[clang] 799270a - [RISCV] Add missing indexed load/store intrinsic tests for zvfbfmin (#148097)
via cfe-commits
cfe-commits at lists.llvm.org
Thu Jul 10 22:43:12 PDT 2025
Author: Jim Lin
Date: 2025-07-11T13:43:04+08:00
New Revision: 799270a807a6fec265388bd0efbac31f46b49a29
URL: https://github.com/llvm/llvm-project/commit/799270a807a6fec265388bd0efbac31f46b49a29
DIFF: https://github.com/llvm/llvm-project/commit/799270a807a6fec265388bd0efbac31f46b49a29.diff
LOG: [RISCV] Add missing indexed load/store intrinsic tests for zvfbfmin (#148097)
https://github.com/riscv-non-isa/rvv-intrinsic-doc/commit/d6d33a0e60fd234f26b79e4fcb00e6ace0b64b4c
adds missing indexed load/store which with other index size.
Added:
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei8.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei32.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei64.c
clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei8.c
Modified:
Removed:
################################################################################
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei32.c
new file mode 100644
index 0000000000000..7bf0a4e5b7b1f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4(const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2(const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_v_bf16m4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei64.c
new file mode 100644
index 0000000000000..be42373070f9d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4(const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2(const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei8.c
new file mode 100644
index 0000000000000..767405acfde03
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4(const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2(const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16m4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8(const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_v_bf16m8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei32.c
new file mode 100644
index 0000000000000..adafe97dff8b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei32.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei64.c
new file mode 100644
index 0000000000000..0be1d3fb19ae8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei8.c
new file mode 100644
index 0000000000000..8a6d93d429dff
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei32.c
new file mode 100644
index 0000000000000..00d0043a5e81b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei64.c
new file mode 100644
index 0000000000000..0685d0cab692d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei8.c
new file mode 100644
index 0000000000000..b68c5f56a65b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei32.c
new file mode 100644
index 0000000000000..0f65998c8f30b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei64.c
new file mode 100644
index 0000000000000..9c120fc68fa82
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei8.c
new file mode 100644
index 0000000000000..f7011650f9ed5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei32.c
new file mode 100644
index 0000000000000..c100047f765c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei64.c
new file mode 100644
index 0000000000000..75342b035c3a2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei8.c
new file mode 100644
index 0000000000000..0e0b2ab502669
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei32.c
new file mode 100644
index 0000000000000..5d686a64b3ff6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei64.c
new file mode 100644
index 0000000000000..180e0e3946c57
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei8.c
new file mode 100644
index 0000000000000..cb25709918f66
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei32.c
new file mode 100644
index 0000000000000..a9c095a02ee80
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei64.c
new file mode 100644
index 0000000000000..89d8f75f74b0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei8.c
new file mode 100644
index 0000000000000..e4aeaadd629c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei32.c
new file mode 100644
index 0000000000000..552880c5d24b7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei64.c
new file mode 100644
index 0000000000000..f57756c4afb3c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei8.c
new file mode 100644
index 0000000000000..6ae89de151b40
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vloxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei32.c
new file mode 100644
index 0000000000000..44a77b981ceb6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4(const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2(const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_v_bf16m4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei64.c
new file mode 100644
index 0000000000000..67645b8ba5cb7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4(const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2(const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei8.c
new file mode 100644
index 0000000000000..f7ac2be80e08d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4(const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2(const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16m1(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16m2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16m4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8(const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_v_bf16m8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m1_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei32.c
new file mode 100644
index 0000000000000..e570c09397e3d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei32.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei64.c
new file mode 100644
index 0000000000000..cc806af77d848
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei8.c
new file mode 100644
index 0000000000000..871567122ce16
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei32.c
new file mode 100644
index 0000000000000..770d7f2c255f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei64.c
new file mode 100644
index 0000000000000..505b2096aaf13
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei8.c
new file mode 100644
index 0000000000000..708f28667f8ab
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei32.c
new file mode 100644
index 0000000000000..7c300c57ad9ae
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei64.c
new file mode 100644
index 0000000000000..db177f38000ed
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei8.c
new file mode 100644
index 0000000000000..fa52ffcb0a53f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei32.c
new file mode 100644
index 0000000000000..393cc317cef38
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei64.c
new file mode 100644
index 0000000000000..8048dbed774bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei8.c
new file mode 100644
index 0000000000000..8fc02319cfba5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei32.c
new file mode 100644
index 0000000000000..a5f680c8b7662
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei64.c
new file mode 100644
index 0000000000000..50a5933e228ac
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei8.c
new file mode 100644
index 0000000000000..8684080d2d362
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei32.c
new file mode 100644
index 0000000000000..6bdcf10de0d34
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei64.c
new file mode 100644
index 0000000000000..7cdd26a6aa481
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei8.c
new file mode 100644
index 0000000000000..6ae6bd3e631d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei32.c
new file mode 100644
index 0000000000000..bbcae0625be42
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei64.c
new file mode 100644
index 0000000000000..42b3365d1116f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei8.c
new file mode 100644
index 0000000000000..a50bb4cedc6d6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vluxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8_m(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8_m(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei32.c
new file mode 100644
index 0000000000000..775a28a4fe1fd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf4(__bf16 *rs1, vuint32mf2_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf2(__bf16 *rs1, vuint32m1_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m1(__bf16 *rs1, vuint32m2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m2(__bf16 *rs1, vuint32m4_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m4(__bf16 *rs1, vuint32m8_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32_v_bf16m4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint32mf2_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei32_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei32_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei32_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei32_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsoxei32_v_bf16m4_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei64.c
new file mode 100644
index 0000000000000..d69aa335b05b5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf4(__bf16 *rs1, vuint64m1_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf2(__bf16 *rs1, vuint64m2_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m1(__bf16 *rs1, vuint64m4_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m2(__bf16 *rs1, vuint64m8_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei64_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei64_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei64_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei64_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei8.c
new file mode 100644
index 0000000000000..e0e8376297555
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf4(__bf16 *rs1, vuint8mf8_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf2(__bf16 *rs1, vuint8mf4_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m1(__bf16 *rs1, vuint8mf2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m2(__bf16 *rs1, vuint8m1_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m4(__bf16 *rs1, vuint8m2_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16m4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m8(__bf16 *rs1, vuint8m4_t rs2, vbfloat16m8_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8_v_bf16m8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16m4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint8m4_t rs2,
+ vbfloat16m8_t vs3, size_t vl) {
+ return __riscv_vsoxei8_v_bf16m8_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei32.c
new file mode 100644
index 0000000000000..560d303933a90
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei32.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf4x2(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf2x2(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m1x2(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m2x2(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m4x2(__bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei64.c
new file mode 100644
index 0000000000000..41debc8613635
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf4x2(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf2x2(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m1x2(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m2x2(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei8.c
new file mode 100644
index 0000000000000..55b3ff48537a4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf4x2(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf2x2(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m1x2(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m2x2(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m4x2(__bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei32.c
new file mode 100644
index 0000000000000..9da13af17a33f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf4x3(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf2x3(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m1x3(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m2x3(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x3_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei64.c
new file mode 100644
index 0000000000000..f8e974cff66cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf4x3(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf2x3(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m1x3(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m2x3(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei8.c
new file mode 100644
index 0000000000000..d80f01903e5c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf4x3(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf2x3(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m1x3(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m2x3(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei32.c
new file mode 100644
index 0000000000000..dec74165c9e87
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf4x4(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf2x4(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m1x4(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m2x4(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei64.c
new file mode 100644
index 0000000000000..9b042b97ba547
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf4x4(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf2x4(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m1x4(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m2x4(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei8.c
new file mode 100644
index 0000000000000..5c1ec6cbfd3ff
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf4x4(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf2x4(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m1x4(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m2x4(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei32.c
new file mode 100644
index 0000000000000..e278c9da207ec
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf4x5(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf2x5(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16m1x5(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x5_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei64.c
new file mode 100644
index 0000000000000..1439ab40b5be8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf4x5(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf2x5(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16m1x5(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei8.c
new file mode 100644
index 0000000000000..1b72716f25088
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf4x5(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf2x5(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16m1x5(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei32.c
new file mode 100644
index 0000000000000..7c659d353fdc4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf4x6(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf2x6(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16m1x6(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x6_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei64.c
new file mode 100644
index 0000000000000..b15b31e58fd01
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf4x6(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf2x6(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16m1x6(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei8.c
new file mode 100644
index 0000000000000..a18dc0cdc31cf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf4x6(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf2x6(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16m1x6(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei32.c
new file mode 100644
index 0000000000000..6e41b5491682e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf4x7(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf2x7(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16m1x7(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x7_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei64.c
new file mode 100644
index 0000000000000..ee8cedf02225f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf4x7(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf2x7(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16m1x7(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei8.c
new file mode 100644
index 0000000000000..ed07ca1803b75
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf4x7(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf2x7(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16m1x7(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei32.c
new file mode 100644
index 0000000000000..c2af8bdb067dc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf4x8(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf2x8(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16m1x8(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x8_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei64.c
new file mode 100644
index 0000000000000..ec8bc85ac4f86
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf4x8(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf2x8(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16m1x8(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei8.c
new file mode 100644
index 0000000000000..5ecd7ff291b8c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsoxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf4x8(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf2x8(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16m1x8(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei32.c
new file mode 100644
index 0000000000000..af8509247cf26
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf4(__bf16 *rs1, vuint32mf2_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf2(__bf16 *rs1, vuint32m1_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m1(__bf16 *rs1, vuint32m2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m2(__bf16 *rs1, vuint32m4_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m4(__bf16 *rs1, vuint32m8_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32_v_bf16m4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint32mf2_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei32_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei32_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei32_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei32_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsuxei32_v_bf16m4_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei64.c
new file mode 100644
index 0000000000000..d230cd6b4b757
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf4(__bf16 *rs1, vuint64m1_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf2(__bf16 *rs1, vuint64m2_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m1(__bf16 *rs1, vuint64m4_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m2(__bf16 *rs1, vuint64m8_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei64_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei64_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei64_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei64_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei8.c
new file mode 100644
index 0000000000000..6c91f5feb6d16
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf4(__bf16 *rs1, vuint8mf8_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16mf4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf2(__bf16 *rs1, vuint8mf4_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16mf2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m1(__bf16 *rs1, vuint8mf2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16m1(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m2(__bf16 *rs1, vuint8m1_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16m2(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m4(__bf16 *rs1, vuint8m2_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16m4(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m8(__bf16 *rs1, vuint8m4_t rs2, vbfloat16m8_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8_v_bf16m8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16mf4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16mf2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16m1_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16m2_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16m4_m(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint8m4_t rs2,
+ vbfloat16m8_t vs3, size_t vl) {
+ return __riscv_vsuxei8_v_bf16m8_m(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei32.c
new file mode 100644
index 0000000000000..95e346cdd4c63
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei32.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf4x2(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf2x2(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m1x2(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m2x2(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m4x2(__bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei64.c
new file mode 100644
index 0000000000000..9001cb1e23da0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf4x2(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf2x2(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m1x2(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m2x2(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei8.c
new file mode 100644
index 0000000000000..3c5490439282c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf4x2(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16mf4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf2x2(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16mf2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m1x2(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m1x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m2x2(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m2x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m4x2(__bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m4x2(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei32.c
new file mode 100644
index 0000000000000..8fd5dd337ea5b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf4x3(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf2x3(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m1x3(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m2x3(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x3_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei64.c
new file mode 100644
index 0000000000000..6ebaff9a8deef
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf4x3(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf2x3(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m1x3(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m2x3(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei8.c
new file mode 100644
index 0000000000000..79ffd53eaaa68
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf4x3(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16mf4x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf2x3(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16mf2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m1x3(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16m1x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m2x3(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16m2x3(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei32.c
new file mode 100644
index 0000000000000..e19555ab1e0d6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf4x4(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf2x4(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m1x4(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m2x4(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei64.c
new file mode 100644
index 0000000000000..6f89441c69faf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf4x4(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf2x4(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m1x4(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m2x4(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei8.c
new file mode 100644
index 0000000000000..1a4d4523b2233
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf4x4(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16mf4x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf2x4(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16mf2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m1x4(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16m1x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m2x4(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16m2x4(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei32.c
new file mode 100644
index 0000000000000..3a96d09995da9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf4x5(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf2x5(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16m1x5(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x5_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei64.c
new file mode 100644
index 0000000000000..2d34ab4e7a36e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf4x5(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf2x5(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16m1x5(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei8.c
new file mode 100644
index 0000000000000..f6f7b4ae6b96e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf4x5(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16mf4x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf2x5(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16mf2x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16m1x5(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16m1x5(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei32.c
new file mode 100644
index 0000000000000..7db63cb97d512
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf4x6(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf2x6(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16m1x6(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x6_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei64.c
new file mode 100644
index 0000000000000..dd6c263688e7e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf4x6(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf2x6(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16m1x6(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei8.c
new file mode 100644
index 0000000000000..157eba825dc2d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf4x6(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16mf4x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf2x6(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16mf2x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16m1x6(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16m1x6(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei32.c
new file mode 100644
index 0000000000000..8e48179147053
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf4x7(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf2x7(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16m1x7(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x7_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei64.c
new file mode 100644
index 0000000000000..6c9a0443f425d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf4x7(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf2x7(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16m1x7(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei8.c
new file mode 100644
index 0000000000000..27ced38cf8407
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf4x7(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16mf4x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf2x7(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16mf2x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16m1x7(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16m1x7(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei32.c
new file mode 100644
index 0000000000000..81adc3cb6ba5e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf4x8(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf2x8(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16m1x8(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x8_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei64.c
new file mode 100644
index 0000000000000..43d76fc2dfd74
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf4x8(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf2x8(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16m1x8(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei8.c
new file mode 100644
index 0000000000000..3976ba816dbeb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/non-overloaded/vsuxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf4x8(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16mf4x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf2x8(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16mf2x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16m1x8(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16m1x8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei32.c
new file mode 100644
index 0000000000000..dbf7941678b33
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4(const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2(const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei64.c
new file mode 100644
index 0000000000000..9a35316065afd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4(const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2(const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei8.c
new file mode 100644
index 0000000000000..e3a6c813660b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4(const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2(const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8(const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei32.c
new file mode 100644
index 0000000000000..7853a29bcfb11
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei32.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei64.c
new file mode 100644
index 0000000000000..0a68ccc33290c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei8.c
new file mode 100644
index 0000000000000..05b59f3b3e259
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei32.c
new file mode 100644
index 0000000000000..683e30c9a6692
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei64.c
new file mode 100644
index 0000000000000..69b2809272eba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei8.c
new file mode 100644
index 0000000000000..c6f00fe76ba26
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei32.c
new file mode 100644
index 0000000000000..55a980a6f67d9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei64.c
new file mode 100644
index 0000000000000..75a0200476176
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei8.c
new file mode 100644
index 0000000000000..f0c75373fc92c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei32.c
new file mode 100644
index 0000000000000..132e27cd557c4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei64.c
new file mode 100644
index 0000000000000..ce1cfeddba1ca
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei8.c
new file mode 100644
index 0000000000000..55b835f3a5421
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei32.c
new file mode 100644
index 0000000000000..c8df8d0c6b907
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei64.c
new file mode 100644
index 0000000000000..217b89ce831d1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei8.c
new file mode 100644
index 0000000000000..4684ebad695a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei32.c
new file mode 100644
index 0000000000000..d9e2e9b41fed2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei64.c
new file mode 100644
index 0000000000000..461eebb092c54
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei8.c
new file mode 100644
index 0000000000000..71da2628c9fd6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei32.c
new file mode 100644
index 0000000000000..7848efaecaec1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei64.c
new file mode 100644
index 0000000000000..4e3e388d260b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei8.c
new file mode 100644
index 0000000000000..399a03afa0c0e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vloxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei32.c
new file mode 100644
index 0000000000000..20bcca1a3fd38
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4(const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2(const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei64.c
new file mode 100644
index 0000000000000..62012fd029307
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4(const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2(const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei8.c
new file mode 100644
index 0000000000000..c4bf30d7319c5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4(const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2(const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8(const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> poison, ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei32.c
new file mode 100644
index 0000000000000..85934361676dc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei32.c
@@ -0,0 +1,120 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2(const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei64.c
new file mode 100644
index 0000000000000..7def329db7be3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei8.c
new file mode 100644
index 0000000000000..bc6f1ffab972c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2(const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) poison, ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei32.c
new file mode 100644
index 0000000000000..8a700b75b4299
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei64.c
new file mode 100644
index 0000000000000..d1d8943310e0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei8.c
new file mode 100644
index 0000000000000..4952e0230a8ea
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei32.c
new file mode 100644
index 0000000000000..d853b86e3dda8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei32.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4(const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei64.c
new file mode 100644
index 0000000000000..97dbdd705fc9f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei64.c
@@ -0,0 +1,98 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4(const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei8.c
new file mode 100644
index 0000000000000..db435cf18daa4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4(const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) poison, ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei32.c
new file mode 100644
index 0000000000000..3719e841581d1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei64.c
new file mode 100644
index 0000000000000..66743a528fd52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei8.c
new file mode 100644
index 0000000000000..f97f5b7597170
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei32.c
new file mode 100644
index 0000000000000..6d760e51245c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei64.c
new file mode 100644
index 0000000000000..3b2ffacfcda62
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei8.c
new file mode 100644
index 0000000000000..1fef58ed84851
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei32.c
new file mode 100644
index 0000000000000..14d7ca9f34800
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei64.c
new file mode 100644
index 0000000000000..50d06a0f4b856
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei8.c
new file mode 100644
index 0000000000000..cdd05faeeded6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei32.c
new file mode 100644
index 0000000000000..8929f3348ba59
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei32.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8(const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8(const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8(const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei32(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei64.c
new file mode 100644
index 0000000000000..ece3490dfcc37
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei64.c
@@ -0,0 +1,76 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8(const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8(const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8(const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei64(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_m(vbool64_t vm,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_m(vbool32_t vm,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei8.c
new file mode 100644
index 0000000000000..9ee1ce5f28820
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vluxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8(const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8(const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8(const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei8(rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) poison, ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) poison, ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8(vm, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8(vm, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei32.c
new file mode 100644
index 0000000000000..db440e9a49157
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf4(__bf16 *rs1, vuint32mf2_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf2(__bf16 *rs1, vuint32m1_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m1(__bf16 *rs1, vuint32m2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m2(__bf16 *rs1, vuint32m4_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m4(__bf16 *rs1, vuint32m8_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint32mf2_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei32_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsoxei32(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei64.c
new file mode 100644
index 0000000000000..a9c28dd6af613
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf4(__bf16 *rs1, vuint64m1_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf2(__bf16 *rs1, vuint64m2_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m1(__bf16 *rs1, vuint64m4_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m2(__bf16 *rs1, vuint64m8_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei64_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei64(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei8.c
new file mode 100644
index 0000000000000..7106538c741d3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf4(__bf16 *rs1, vuint8mf8_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf2(__bf16 *rs1, vuint8mf4_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m1(__bf16 *rs1, vuint8mf2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m2(__bf16 *rs1, vuint8m1_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m4(__bf16 *rs1, vuint8m2_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m8(__bf16 *rs1, vuint8m4_t rs2, vbfloat16m8_t vs3,
+ size_t vl) {
+ return __riscv_vsoxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxei8_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint8m4_t rs2,
+ vbfloat16m8_t vs3, size_t vl) {
+ return __riscv_vsoxei8(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei32.c
new file mode 100644
index 0000000000000..cc3954a0eecf2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei32.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf4x2(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf2x2(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m1x2(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m2x2(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m4x2(__bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x2_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei32_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei64.c
new file mode 100644
index 0000000000000..5b716beb149b7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf4x2(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf2x2(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m1x2(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m2x2(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei64_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei8.c
new file mode 100644
index 0000000000000..c346040ae6da3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf4x2(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf2x2(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m1x2(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m2x2(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m4x2(__bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg2ei8_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsoxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei32.c
new file mode 100644
index 0000000000000..c283b82c2f9f9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf4x3(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf2x3(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m1x3(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m2x3(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x3_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei32_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei64.c
new file mode 100644
index 0000000000000..3a7dd53a4999b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf4x3(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf2x3(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m1x3(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m2x3(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei64_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei8.c
new file mode 100644
index 0000000000000..5ae57d256056e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf4x3(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf2x3(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m1x3(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m2x3(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg3ei8_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsoxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei32.c
new file mode 100644
index 0000000000000..23fe189930964
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf4x4(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf2x4(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m1x4(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m2x4(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x4_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei32_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei64.c
new file mode 100644
index 0000000000000..5411c6af8ae7a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf4x4(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf2x4(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m1x4(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m2x4(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei64_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei8.c
new file mode 100644
index 0000000000000..783029bc125a6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf4x4(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf2x4(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m1x4(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m2x4(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg4ei8_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsoxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei32.c
new file mode 100644
index 0000000000000..9f18e9c1a965c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf4x5(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf2x5(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16m1x5(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x5_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei32_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei64.c
new file mode 100644
index 0000000000000..8ea144f8d9882
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf4x5(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf2x5(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16m1x5(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei64_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei8.c
new file mode 100644
index 0000000000000..d61d35ec7ef22
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf4x5(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf2x5(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16m1x5(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg5ei8_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsoxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei32.c
new file mode 100644
index 0000000000000..4050a61ec1d66
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf4x6(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf2x6(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16m1x6(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x6_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei32_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei64.c
new file mode 100644
index 0000000000000..90fc1a66802fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf4x6(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf2x6(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16m1x6(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei64_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei8.c
new file mode 100644
index 0000000000000..7c9b7fd261ef2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf4x6(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf2x6(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16m1x6(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg6ei8_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsoxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei32.c
new file mode 100644
index 0000000000000..2fa45968720e7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf4x7(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf2x7(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16m1x7(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x7_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei32_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei64.c
new file mode 100644
index 0000000000000..26462c20ab1a1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf4x7(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf2x7(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16m1x7(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei64_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei8.c
new file mode 100644
index 0000000000000..b3709517fd352
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf4x7(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf2x7(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16m1x7(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg7ei8_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsoxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei32.c
new file mode 100644
index 0000000000000..412fdb9e37643
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf4x8(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf2x8(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16m1x8(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x8_t vs3,
+ size_t vl) {
+ return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei32_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei64.c
new file mode 100644
index 0000000000000..7f34e54b0a1fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf4x8(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf2x8(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16m1x8(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei64_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei8.c
new file mode 100644
index 0000000000000..088ca08fca7f4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsoxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf4x8(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf2x8(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16m1x8(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsoxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsoxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsoxseg8ei8_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsoxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei32.c
new file mode 100644
index 0000000000000..13a2d299c8f73
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei32.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf4(__bf16 *rs1, vuint32mf2_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf2(__bf16 *rs1, vuint32m1_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m1(__bf16 *rs1, vuint32m2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m2(__bf16 *rs1, vuint32m4_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m4(__bf16 *rs1, vuint32m8_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei32(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint32mf2_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei32_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei32_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsuxei32(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei64.c
new file mode 100644
index 0000000000000..a6c618a2fb760
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf4(__bf16 *rs1, vuint64m1_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf2(__bf16 *rs1, vuint64m2_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m1(__bf16 *rs1, vuint64m4_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m2(__bf16 *rs1, vuint64m8_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei64(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei64_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei64_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei64(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei8.c
new file mode 100644
index 0000000000000..67c4d4b952a3a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxei8.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf4(__bf16 *rs1, vuint8mf8_t rs2, vbfloat16mf4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf2(__bf16 *rs1, vuint8mf4_t rs2, vbfloat16mf2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m1(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m1(__bf16 *rs1, vuint8mf2_t rs2, vbfloat16m1_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m2(__bf16 *rs1, vuint8m1_t rs2, vbfloat16m2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m4(__bf16 *rs1, vuint8m2_t rs2, vbfloat16m4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m8(__bf16 *rs1, vuint8m4_t rs2, vbfloat16m8_t vs3,
+ size_t vl) {
+ return __riscv_vsuxei8(rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], <vscale x 1 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t rs2,
+ vbfloat16mf4_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16mf2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], <vscale x 2 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t rs2,
+ vbfloat16mf2_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m1_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], <vscale x 4 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t rs2,
+ vbfloat16m1_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], <vscale x 8 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t rs2,
+ vbfloat16m2_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m4_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], <vscale x 16 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t rs2,
+ vbfloat16m4_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxei8_v_bf16m8_m(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], <vscale x 32 x bfloat> [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VS3]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxei8_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint8m4_t rs2,
+ vbfloat16m8_t vs3, size_t vl) {
+ return __riscv_vsuxei8(vm, rs1, rs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei32.c
new file mode 100644
index 0000000000000..9a3b30c9eff1a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei32.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf4x2(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf2x2(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m1x2(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m2x2(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m4x2(__bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x2_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei32_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei32_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint32m8_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei64.c
new file mode 100644
index 0000000000000..b2c445730d2fb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf4x2(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf2x2(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m1x2(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m2x2(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei64_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei64_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei8.c
new file mode 100644
index 0000000000000..6b9774c560521
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg2ei8.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf4x2(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf2x2(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m1x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m1x2(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m2x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m2x2(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m4x2(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m4x2(__bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf4x2_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16mf2x2_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m1x2_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m2x2_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg2ei8_v_bf16m4x2_m(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VS3]], ptr [[RS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg2ei8_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint8m2_t vs2,
+ vbfloat16m4x2_t vs3, size_t vl) {
+ return __riscv_vsuxseg2ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei32.c
new file mode 100644
index 0000000000000..402045661c33f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf4x3(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf2x3(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m1x3(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m2x3(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x3_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei32_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei32_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei64.c
new file mode 100644
index 0000000000000..f1ebe0b61966a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf4x3(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf2x3(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m1x3(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m2x3(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei64_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei64_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei8.c
new file mode 100644
index 0000000000000..d0c36534ca869
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg3ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf4x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf4x3(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf2x3(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m1x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m1x3(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m2x3(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m2x3(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf4x3_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16mf2x3_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m1x3_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg3ei8_v_bf16m2x3_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg3ei8_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x3_t vs3, size_t vl) {
+ return __riscv_vsuxseg3ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei32.c
new file mode 100644
index 0000000000000..6b1c29dae50ad
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei32.c
@@ -0,0 +1,97 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf4x4(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf2x4(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m1x4(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m2x4(__bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x4_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei32_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei32_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint32m4_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei64.c
new file mode 100644
index 0000000000000..8c3c04ee76a50
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei64.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf4x4(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf2x4(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m1x4(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m2x4(__bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei64_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei64_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint64m8_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei8.c
new file mode 100644
index 0000000000000..b052914453463
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg4ei8.c
@@ -0,0 +1,96 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf4x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf4x4(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf2x4(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m1x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m1x4(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m2x4(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m2x4(__bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf4x4_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16mf2x4_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m1x4_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg4ei8_v_bf16m2x4_m(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VS3]], ptr [[RS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg4ei8_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint8m1_t vs2,
+ vbfloat16m2x4_t vs3, size_t vl) {
+ return __riscv_vsuxseg4ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei32.c
new file mode 100644
index 0000000000000..8e2b11b0bda6f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf4x5(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf2x5(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16m1x5(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x5_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei32_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei32_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei64.c
new file mode 100644
index 0000000000000..e3f24c791744e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf4x5(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf2x5(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16m1x5(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei64_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei64_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei8.c
new file mode 100644
index 0000000000000..1130456ee6cfd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg5ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf4x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf4x5(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf2x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf2x5(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16m1x5(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16m1x5(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf4x5_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16mf2x5_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg5ei8_v_bf16m1x5_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg5ei8_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x5_t vs3, size_t vl) {
+ return __riscv_vsuxseg5ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei32.c
new file mode 100644
index 0000000000000..dc619aa60aef0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf4x6(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf2x6(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16m1x6(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x6_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei32_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei32_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei64.c
new file mode 100644
index 0000000000000..35a776bcda289
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf4x6(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf2x6(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16m1x6(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei64_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei64_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei8.c
new file mode 100644
index 0000000000000..d474a5682b17c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg6ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf4x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf4x6(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf2x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf2x6(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16m1x6(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16m1x6(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf4x6_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16mf2x6_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg6ei8_v_bf16m1x6_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg6ei8_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x6_t vs3, size_t vl) {
+ return __riscv_vsuxseg6ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei32.c
new file mode 100644
index 0000000000000..d2e1859fa300f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf4x7(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf2x7(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16m1x7(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x7_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei32_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei32_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei64.c
new file mode 100644
index 0000000000000..cfc1cfca208c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf4x7(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf2x7(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16m1x7(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei64_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei64_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei8.c
new file mode 100644
index 0000000000000..a887bf12fd5bf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg7ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf4x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf4x7(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf2x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf2x7(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16m1x7(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16m1x7(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf4x7_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16mf2x7_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg7ei8_v_bf16m1x7_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg7ei8_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x7_t vs3, size_t vl) {
+ return __riscv_vsuxseg7ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei32.c
new file mode 100644
index 0000000000000..38f3f8a3a96e2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei32.c
@@ -0,0 +1,75 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf4x8(__bf16 *rs1, vuint32mf2_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf2x8(__bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16m1x8(__bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1,
+ vuint32mf2_t vs2, vbfloat16mf4x8_t vs3,
+ size_t vl) {
+ return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint32m1_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei32_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei32_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint32m2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei32(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei64.c
new file mode 100644
index 0000000000000..4adeaf94608eb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei64.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf4x8(__bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf2x8(__bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16m1x8(__bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint64m1_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint64m2_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei64_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei64_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint64m4_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei64(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei8.c
new file mode 100644
index 0000000000000..25cbcf6887063
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/non-policy/overloaded/vsuxseg8ei8.c
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf4x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf4x8(__bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf2x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf2x8(__bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16m1x8(
+// CHECK-RV64-SAME: ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16m1x8(__bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf4x8_m(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, vuint8mf8_t vs2,
+ vbfloat16mf4x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16mf2x8_m(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, vuint8mf4_t vs2,
+ vbfloat16mf2x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local void @test_vsuxseg8ei8_v_bf16m1x8_m(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VS3]], ptr [[RS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret void
+//
+void test_vsuxseg8ei8_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint8mf2_t vs2,
+ vbfloat16m1x8_t vs3, size_t vl) {
+ return __riscv_vsuxseg8ei8(vm, rs1, vs2, vs3, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei32.c
new file mode 100644
index 0000000000000..ec107fa023afa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei32.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_v_bf16m4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei64.c
new file mode 100644
index 0000000000000..22081708baf15
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei8.c
new file mode 100644
index 0000000000000..b0b97875ac3ba
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxei8.c
@@ -0,0 +1,248 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_v_bf16m8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei32.c
new file mode 100644
index 0000000000000..df494cfe1b233
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei32.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei64.c
new file mode 100644
index 0000000000000..1d7d48a3305c9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei8.c
new file mode 100644
index 0000000000000..44070dd3888c7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg2ei8.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei32.c
new file mode 100644
index 0000000000000..86b6692126e89
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei32.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei64.c
new file mode 100644
index 0000000000000..960df2840fadd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei8.c
new file mode 100644
index 0000000000000..00d0958458b0f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg3ei8.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei32.c
new file mode 100644
index 0000000000000..0ad8e794bc4bc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei32.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei64.c
new file mode 100644
index 0000000000000..337031e119f7d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei8.c
new file mode 100644
index 0000000000000..f2d9383676af1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg4ei8.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei32.c
new file mode 100644
index 0000000000000..8a43698cc7b8d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei64.c
new file mode 100644
index 0000000000000..90bd04ecaf510
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei8.c
new file mode 100644
index 0000000000000..bd25294cbade4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg5ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei32.c
new file mode 100644
index 0000000000000..017317d5d681a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei64.c
new file mode 100644
index 0000000000000..b835ec1bbcbf6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei8.c
new file mode 100644
index 0000000000000..7f12dd202bed6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg6ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei32.c
new file mode 100644
index 0000000000000..6478fcf7ab914
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei64.c
new file mode 100644
index 0000000000000..986045a6b85b8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei8.c
new file mode 100644
index 0000000000000..bfe80e670df0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg7ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei32.c
new file mode 100644
index 0000000000000..c5679ae3ca327
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei64.c
new file mode 100644
index 0000000000000..3e87a24394fd0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei8.c
new file mode 100644
index 0000000000000..b4c20694b3599
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vloxseg8ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei32.c
new file mode 100644
index 0000000000000..85d344f63301b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei32.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_v_bf16m4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei64.c
new file mode 100644
index 0000000000000..7d5914e241fad
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei8.c
new file mode 100644
index 0000000000000..118843e2adf35
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxei8.c
@@ -0,0 +1,248 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m1_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m1_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m1_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16mf2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m1_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_v_bf16m8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei32.c
new file mode 100644
index 0000000000000..3428217434ec2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei32.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei64.c
new file mode 100644
index 0000000000000..487ae96284022
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei8.c
new file mode 100644
index 0000000000000..d228b2b6db49d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg2ei8.c
@@ -0,0 +1,208 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tum(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tum(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_mu(vbool64_t vm, vbfloat16mf4x2_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_mu(vbool32_t vm, vbfloat16mf2x2_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, const __bf16 *rs1, vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei32.c
new file mode 100644
index 0000000000000..ff211e99a5f4e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei32.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei64.c
new file mode 100644
index 0000000000000..ae79962b81b46
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei8.c
new file mode 100644
index 0000000000000..18c7af2663099
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg3ei8.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tum(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tum(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_mu(vbool64_t vm, vbfloat16mf4x3_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_mu(vbool32_t vm, vbfloat16mf2x3_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei32.c
new file mode 100644
index 0000000000000..d0c9adf52942c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei32.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei64.c
new file mode 100644
index 0000000000000..b68db5fefe5e8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei64.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei8.c
new file mode 100644
index 0000000000000..1cca5289ab74b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg4ei8.c
@@ -0,0 +1,168 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tum(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tum(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_mu(vbool64_t vm, vbfloat16mf4x4_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_mu(vbool32_t vm, vbfloat16mf2x4_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, const __bf16 *rs1, vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei32.c
new file mode 100644
index 0000000000000..3e1d4e325c4a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei64.c
new file mode 100644
index 0000000000000..cc8c4dfc2b057
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei8.c
new file mode 100644
index 0000000000000..779368d55e95a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg5ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tum(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tum(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_mu(vbool64_t vm, vbfloat16mf4x5_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_mu(vbool32_t vm, vbfloat16mf2x5_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei32.c
new file mode 100644
index 0000000000000..dec5b0af5eab1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei64.c
new file mode 100644
index 0000000000000..463f026e4d897
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei8.c
new file mode 100644
index 0000000000000..88a89bd3c1480
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg6ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tum(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tum(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_mu(vbool64_t vm, vbfloat16mf4x6_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_mu(vbool32_t vm, vbfloat16mf2x6_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei32.c
new file mode 100644
index 0000000000000..f14c2bd126226
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei64.c
new file mode 100644
index 0000000000000..c2cb327af0535
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei8.c
new file mode 100644
index 0000000000000..93b0a5539ff59
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg7ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tum(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tum(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_mu(vbool64_t vm, vbfloat16mf4x7_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_mu(vbool32_t vm, vbfloat16mf2x7_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei32.c
new file mode 100644
index 0000000000000..b0e1656a27139
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei32.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei64.c
new file mode 100644
index 0000000000000..9820438b36135
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei64.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei8.c
new file mode 100644
index 0000000000000..5290e29b6bb05
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/non-overloaded/vluxseg8ei8.c
@@ -0,0 +1,128 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tum(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tum(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_mu(vbool64_t vm, vbfloat16mf4x8_t vd, const __bf16 *rs1, vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_mu(vbool32_t vm, vbfloat16mf2x8_t vd, const __bf16 *rs1, vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, const __bf16 *rs1, vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei32.c
new file mode 100644
index 0000000000000..82ea42b1b5b87
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei32.c
@@ -0,0 +1,243 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei32_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei32_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei32_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei32_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei32_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei32_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei32_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei32_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei32_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei32_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei64.c
new file mode 100644
index 0000000000000..ec6ee7d626b52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei64.c
@@ -0,0 +1,196 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei64_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei64_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei64_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei64_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei64_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei64_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei64_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei64_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei8.c
new file mode 100644
index 0000000000000..93a6c28e4a79c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxei8.c
@@ -0,0 +1,290 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vloxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vloxei8_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vloxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vloxei8_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vloxei8_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vloxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vloxei8_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vloxei8_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vloxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vloxei8_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vloxei8_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vloxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vloxei8_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vloxei8_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vloxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vloxei8_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vloxei8_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vloxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vloxei8_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vloxei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei32.c
new file mode 100644
index 0000000000000..2dc68cf30319c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei32.c
@@ -0,0 +1,264 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tu(vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tum(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tum(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_tumu(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei32_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei32_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei32_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei32_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei32_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei32_v_bf16m1x2_mu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei32_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei32_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei32_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei32_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei64.c
new file mode 100644
index 0000000000000..aebef33ace64a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tum(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei64_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei64_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei64_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei64_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei64_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei64_v_bf16m1x2_mu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei64_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei64_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei8.c
new file mode 100644
index 0000000000000..fc11aef5bf5e0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg2ei8.c
@@ -0,0 +1,258 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tu(vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_tumu(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vloxseg2ei8_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vloxseg2ei8_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vloxseg2ei8_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vloxseg2ei8_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vloxseg2ei8_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vloxseg2ei8_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vloxseg2ei8_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vloxseg2ei8_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vloxseg2ei8_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vloxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vloxseg2ei8_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vloxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei32.c
new file mode 100644
index 0000000000000..ac1ac4aba264e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei32.c
@@ -0,0 +1,214 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tum(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei32_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei32_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei32_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei32_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei32_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei32_v_bf16m1x3_mu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei32_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei32_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei64.c
new file mode 100644
index 0000000000000..86519aafc1d35
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tum(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei64_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei64_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei64_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei64_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei64_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei64_v_bf16m1x3_mu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei64_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei64_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei8.c
new file mode 100644
index 0000000000000..92ea80dfd2f45
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg3ei8.c
@@ -0,0 +1,209 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vloxseg3ei8_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vloxseg3ei8_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vloxseg3ei8_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vloxseg3ei8_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vloxseg3ei8_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vloxseg3ei8_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vloxseg3ei8_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vloxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vloxseg3ei8_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei32.c
new file mode 100644
index 0000000000000..7cb17187f7013
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei32.c
@@ -0,0 +1,214 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tum(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei32_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei32_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei32_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei32_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei32_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei32_v_bf16m1x4_mu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei32_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei32_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei64.c
new file mode 100644
index 0000000000000..054d04ff16da1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tum(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei64_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei64_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei64_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei64_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei64_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei64_v_bf16m1x4_mu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei64_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei64_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei8.c
new file mode 100644
index 0000000000000..f1571233cdd9e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg4ei8.c
@@ -0,0 +1,209 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vloxseg4ei8_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vloxseg4ei8_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vloxseg4ei8_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vloxseg4ei8_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vloxseg4ei8_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vloxseg4ei8_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vloxseg4ei8_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vloxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vloxseg4ei8_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vloxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei32.c
new file mode 100644
index 0000000000000..3f57c002fe4c2
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei32_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei32_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei32_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei32_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei32_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei32_v_bf16m1x5_mu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei64.c
new file mode 100644
index 0000000000000..fb9850cb0bff9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei64_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei64_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei64_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei64_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei64_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei64_v_bf16m1x5_mu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei8.c
new file mode 100644
index 0000000000000..06f3e8ffbf712
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg5ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vloxseg5ei8_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vloxseg5ei8_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vloxseg5ei8_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vloxseg5ei8_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vloxseg5ei8_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vloxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vloxseg5ei8_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei32.c
new file mode 100644
index 0000000000000..94e44d09e8313
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei32_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei32_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei32_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei32_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei32_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei32_v_bf16m1x6_mu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei64.c
new file mode 100644
index 0000000000000..2981b18c491d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei64_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei64_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei64_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei64_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei64_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei64_v_bf16m1x6_mu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei8.c
new file mode 100644
index 0000000000000..23fa390aef0dd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg6ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vloxseg6ei8_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vloxseg6ei8_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vloxseg6ei8_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vloxseg6ei8_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vloxseg6ei8_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vloxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vloxseg6ei8_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei32.c
new file mode 100644
index 0000000000000..f3293d0398585
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei32_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei32_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei32_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei32_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei32_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei32_v_bf16m1x7_mu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei64.c
new file mode 100644
index 0000000000000..10209cc2192ed
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei64_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei64_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei64_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei64_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei64_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei64_v_bf16m1x7_mu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei8.c
new file mode 100644
index 0000000000000..8f03ff1603085
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg7ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vloxseg7ei8_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vloxseg7ei8_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vloxseg7ei8_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vloxseg7ei8_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vloxseg7ei8_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vloxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vloxseg7ei8_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei32.c
new file mode 100644
index 0000000000000..a8c0d4909af09
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei32_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei32_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei32_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei32_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei32_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei32_v_bf16m1x8_mu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei64.c
new file mode 100644
index 0000000000000..08c8fdc56c9cb
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei64_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei64_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei64_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei64_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei64_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei64_v_bf16m1x8_mu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei8.c
new file mode 100644
index 0000000000000..247dcfb578294
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vloxseg8ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vloxseg8ei8_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vloxseg8ei8_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vloxseg8ei8_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vloxseg8ei8_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vloxseg8ei8_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vloxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vloxseg8ei8_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vloxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei32.c
new file mode 100644
index 0000000000000..eef643f85e038
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei32.c
@@ -0,0 +1,243 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei32_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i32.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei32_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei32_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i32.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei32_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei32_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i32.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei32_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint32m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei32_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i32.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei32_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint32m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei32_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i32.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei32_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint32m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei64.c
new file mode 100644
index 0000000000000..08eac74a0163b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei64.c
@@ -0,0 +1,196 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei64_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i64.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei64_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei64_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i64.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei64_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei64_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i64.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei64_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint64m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei64_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i64.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei64_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint64m8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei8.c
new file mode 100644
index 0000000000000..88c00a6c86423
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxei8.c
@@ -0,0 +1,290 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tu(
+// CHECK-RV64-SAME: <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tu(
+// CHECK-RV64-SAME: <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tu(
+// CHECK-RV64-SAME: <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tu(
+// CHECK-RV64-SAME: <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tu(
+// CHECK-RV64-SAME: <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tu(
+// CHECK-RV64-SAME: <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1,
+ vuint8m4_t rs2, size_t vl) {
+ return __riscv_vluxei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tum(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_tumu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x bfloat> @test_vluxei8_v_bf16mf4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x bfloat> @llvm.riscv.vluxei.mask.nxv1bf16.p0.nxv1i8.i64(<vscale x 1 x bfloat> [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 1 x bfloat> [[TMP0]]
+//
+vbfloat16mf4_t test_vluxei8_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd,
+ const __bf16 *rs1, vuint8mf8_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x bfloat> @test_vluxei8_v_bf16mf2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x bfloat> @llvm.riscv.vluxei.mask.nxv2bf16.p0.nxv2i8.i64(<vscale x 2 x bfloat> [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 2 x bfloat> [[TMP0]]
+//
+vbfloat16mf2_t test_vluxei8_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd,
+ const __bf16 *rs1, vuint8mf4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x bfloat> @test_vluxei8_v_bf16m1_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x bfloat> @llvm.riscv.vluxei.mask.nxv4bf16.p0.nxv4i8.i64(<vscale x 4 x bfloat> [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 4 x bfloat> [[TMP0]]
+//
+vbfloat16m1_t test_vluxei8_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd,
+ const __bf16 *rs1, vuint8mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x bfloat> @test_vluxei8_v_bf16m2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x bfloat> @llvm.riscv.vluxei.mask.nxv8bf16.p0.nxv8i8.i64(<vscale x 8 x bfloat> [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 8 x bfloat> [[TMP0]]
+//
+vbfloat16m2_t test_vluxei8_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd,
+ const __bf16 *rs1, vuint8m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x bfloat> @test_vluxei8_v_bf16m4_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x bfloat> @llvm.riscv.vluxei.mask.nxv16bf16.p0.nxv16i8.i64(<vscale x 16 x bfloat> [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 16 x bfloat> [[TMP0]]
+//
+vbfloat16m4_t test_vluxei8_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd,
+ const __bf16 *rs1, vuint8m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x bfloat> @test_vluxei8_v_bf16m8_mu(
+// CHECK-RV64-SAME: <vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x bfloat> [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 32 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x bfloat> @llvm.riscv.vluxei.mask.nxv32bf16.p0.nxv32i8.i64(<vscale x 32 x bfloat> [[VD]], ptr [[RS1]], <vscale x 32 x i8> [[RS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT: ret <vscale x 32 x bfloat> [[TMP0]]
+//
+vbfloat16m8_t test_vluxei8_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd,
+ const __bf16 *rs1, vuint8m4_t rs2,
+ size_t vl) {
+ return __riscv_vluxei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei32.c
new file mode 100644
index 0000000000000..a6d5aa949b4d8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei32.c
@@ -0,0 +1,264 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tu(vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tum(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tum(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_tumu(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei32_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei32_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei32_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei32_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei32_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei32_v_bf16m1x2_mu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei32_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei32_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei32_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i32.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i32> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei32_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint32m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei64.c
new file mode 100644
index 0000000000000..f19c96521a7a6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tum(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei64_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei64_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei64_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei64_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei64_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei64_v_bf16m1x2_mu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei64_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei64_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei8.c
new file mode 100644
index 0000000000000..07e820458e709
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg2ei8.c
@@ -0,0 +1,258 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tu(vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tu(vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tu(vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tum(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tum(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tum(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tum(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_tumu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_tumu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_tumu(vbool16_t vm,
+ vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_tumu(vbool8_t vm,
+ vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_tumu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_tumu(vbool4_t vm,
+ vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @test_vluxseg2ei8_v_bf16mf4x2_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv2i8_2t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf4x2_t test_vluxseg2ei8_v_bf16mf4x2_mu(vbool64_t vm,
+ vbfloat16mf4x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @test_vluxseg2ei8_v_bf16mf2x2_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv4i8_2t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 2) [[TMP0]]
+//
+vbfloat16mf2x2_t test_vluxseg2ei8_v_bf16mf2x2_mu(vbool32_t vm,
+ vbfloat16mf2x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @test_vluxseg2ei8_v_bf16m1x2_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv8i8_2t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 2) [[TMP0]]
+//
+vbfloat16m1x2_t test_vluxseg2ei8_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @test_vluxseg2ei8_v_bf16m2x2_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv16i8_2t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) [[TMP0]]
+//
+vbfloat16m2x2_t test_vluxseg2ei8_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @test_vluxseg2ei8_v_bf16m4x2_mu(
+// CHECK-RV64-SAME: <vscale x 16 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 16 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv32i8_2t.p0.nxv16i8.nxv16i1.i64(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[VD]], ptr [[RS1]], <vscale x 16 x i8> [[RS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 32 x i8>, 2) [[TMP0]]
+//
+vbfloat16m4x2_t test_vluxseg2ei8_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd,
+ const __bf16 *rs1,
+ vuint8m2_t rs2, size_t vl) {
+ return __riscv_vluxseg2ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei32.c
new file mode 100644
index 0000000000000..05bece790e068
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei32.c
@@ -0,0 +1,214 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tum(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei32_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei32_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei32_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei32_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei32_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei32_v_bf16m1x3_mu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei32_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei32_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei64.c
new file mode 100644
index 0000000000000..16898e7e27471
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tum(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei64_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei64_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei64_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei64_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei64_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei64_v_bf16m1x3_mu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei64_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei64_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei8.c
new file mode 100644
index 0000000000000..b48554b75eadc
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg3ei8.c
@@ -0,0 +1,209 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tu(vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tu(vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tum(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tum(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tum(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_tumu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_tumu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_tumu(vbool16_t vm,
+ vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_tumu(vbool8_t vm,
+ vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @test_vluxseg3ei8_v_bf16mf4x3_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv2i8_3t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf4x3_t test_vluxseg3ei8_v_bf16mf4x3_mu(vbool64_t vm,
+ vbfloat16mf4x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @test_vluxseg3ei8_v_bf16mf2x3_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv4i8_3t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 3) [[TMP0]]
+//
+vbfloat16mf2x3_t test_vluxseg3ei8_v_bf16mf2x3_mu(vbool32_t vm,
+ vbfloat16mf2x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @test_vluxseg3ei8_v_bf16m1x3_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv8i8_3t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 3) [[TMP0]]
+//
+vbfloat16m1x3_t test_vluxseg3ei8_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @test_vluxseg3ei8_v_bf16m2x3_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv16i8_3t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 3) [[TMP0]]
+//
+vbfloat16m2x3_t test_vluxseg3ei8_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg3ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei32.c
new file mode 100644
index 0000000000000..dfd314f8f23ed
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei32.c
@@ -0,0 +1,214 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tum(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei32_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei32_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei32_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei32_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei32_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei32_v_bf16m1x4_mu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei32_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i32.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i32> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei32_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint32m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei64.c
new file mode 100644
index 0000000000000..a8f779af29cd8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei64.c
@@ -0,0 +1,213 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tum(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei64_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei64_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei64_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei64_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei64_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei64_v_bf16m1x4_mu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei64_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i64.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i64> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei64_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint64m8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei8.c
new file mode 100644
index 0000000000000..b3fc409391c6a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg4ei8.c
@@ -0,0 +1,209 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tu(vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tu(vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tum(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tum(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tum(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tum(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_tumu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_tumu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_tumu(vbool16_t vm,
+ vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_tumu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_tumu(vbool8_t vm,
+ vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @test_vluxseg4ei8_v_bf16mf4x4_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv2i8_4t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf4x4_t test_vluxseg4ei8_v_bf16mf4x4_mu(vbool64_t vm,
+ vbfloat16mf4x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @test_vluxseg4ei8_v_bf16mf2x4_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv4i8_4t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 4) [[TMP0]]
+//
+vbfloat16mf2x4_t test_vluxseg4ei8_v_bf16mf2x4_mu(vbool32_t vm,
+ vbfloat16mf2x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @test_vluxseg4ei8_v_bf16m1x4_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv8i8_4t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 4) [[TMP0]]
+//
+vbfloat16m1x4_t test_vluxseg4ei8_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @test_vluxseg4ei8_v_bf16m2x4_mu(
+// CHECK-RV64-SAME: <vscale x 8 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 8 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 16 x i8>, 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv8i8.nxv8i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[VD]], ptr [[RS1]], <vscale x 8 x i8> [[RS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 16 x i8>, 4) [[TMP0]]
+//
+vbfloat16m2x4_t test_vluxseg4ei8_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd,
+ const __bf16 *rs1,
+ vuint8m1_t rs2, size_t vl) {
+ return __riscv_vluxseg4ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei32.c
new file mode 100644
index 0000000000000..f40d2b8e162e9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei32_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei32_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei32_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei32_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei32_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei32_v_bf16m1x5_mu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei64.c
new file mode 100644
index 0000000000000..da3efe94fafbf
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei64_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei64_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei64_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei64_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei64_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei64_v_bf16m1x5_mu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei8.c
new file mode 100644
index 0000000000000..422a271e583aa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg5ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tu(vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tum(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tum(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tum(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_tumu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_tumu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_tumu(vbool16_t vm,
+ vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @test_vluxseg5ei8_v_bf16mf4x5_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv2i8_5t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf4x5_t test_vluxseg5ei8_v_bf16mf4x5_mu(vbool64_t vm,
+ vbfloat16mf4x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @test_vluxseg5ei8_v_bf16mf2x5_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv4i8_5t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 5) [[TMP0]]
+//
+vbfloat16mf2x5_t test_vluxseg5ei8_v_bf16mf2x5_mu(vbool32_t vm,
+ vbfloat16mf2x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @test_vluxseg5ei8_v_bf16m1x5_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv8i8_5t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 5) [[TMP0]]
+//
+vbfloat16m1x5_t test_vluxseg5ei8_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg5ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei32.c
new file mode 100644
index 0000000000000..ecdd9cc8ff315
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei32_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei32_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei32_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei32_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei32_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei32_v_bf16m1x6_mu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei64.c
new file mode 100644
index 0000000000000..d428a81cfddc8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei64_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei64_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei64_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei64_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei64_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei64_v_bf16m1x6_mu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei8.c
new file mode 100644
index 0000000000000..cb38825634e43
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg6ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tu(vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tum(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tum(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tum(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_tumu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_tumu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_tumu(vbool16_t vm,
+ vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @test_vluxseg6ei8_v_bf16mf4x6_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv2i8_6t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf4x6_t test_vluxseg6ei8_v_bf16mf4x6_mu(vbool64_t vm,
+ vbfloat16mf4x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @test_vluxseg6ei8_v_bf16mf2x6_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv4i8_6t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 6) [[TMP0]]
+//
+vbfloat16mf2x6_t test_vluxseg6ei8_v_bf16mf2x6_mu(vbool32_t vm,
+ vbfloat16mf2x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @test_vluxseg6ei8_v_bf16m1x6_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv8i8_6t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 6) [[TMP0]]
+//
+vbfloat16m1x6_t test_vluxseg6ei8_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg6ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei32.c
new file mode 100644
index 0000000000000..c446be91583f5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei32_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei32_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei32_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei32_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei32_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei32_v_bf16m1x7_mu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei64.c
new file mode 100644
index 0000000000000..06f159dab8630
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei64_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei64_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei64_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei64_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei64_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei64_v_bf16m1x7_mu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei8.c
new file mode 100644
index 0000000000000..8f91c7d642553
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg7ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tu(vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tum(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tum(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tum(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_tumu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_tumu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_tumu(vbool16_t vm,
+ vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @test_vluxseg7ei8_v_bf16mf4x7_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv2i8_7t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf4x7_t test_vluxseg7ei8_v_bf16mf4x7_mu(vbool64_t vm,
+ vbfloat16mf4x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @test_vluxseg7ei8_v_bf16mf2x7_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv4i8_7t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 7) [[TMP0]]
+//
+vbfloat16mf2x7_t test_vluxseg7ei8_v_bf16mf2x7_mu(vbool32_t vm,
+ vbfloat16mf2x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @test_vluxseg7ei8_v_bf16m1x7_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv8i8_7t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 7) [[TMP0]]
+//
+vbfloat16m1x7_t test_vluxseg7ei8_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg7ei8_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei32.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei32.c
new file mode 100644
index 0000000000000..0fef431e84c08
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei32.c
@@ -0,0 +1,164 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei32_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i32.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i32> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei32_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint32mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei32_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i32.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i32> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei32_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint32m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei32_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i32> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i32.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i32> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei32_v_bf16m1x8_mu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint32m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei32_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei64.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei64.c
new file mode 100644
index 0000000000000..5e941b53727de
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei64.c
@@ -0,0 +1,163 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2,
+ size_t vl) {
+ return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei64_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i64.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i64> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei64_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint64m1_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei64_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i64.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i64> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei64_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint64m2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei64_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i64> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i64.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i64> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei64_v_bf16m1x8_mu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint64m4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei64_mu(vm, vd, rs1, rs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei8.c
new file mode 100644
index 0000000000000..49cb2aa8921de
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfmin/policy/overloaded/vluxseg8ei8.c
@@ -0,0 +1,160 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
+// RUN: -target-feature +zvfbfmin -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tu(
+// CHECK-RV64-SAME: target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], i64 [[VL]], i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tu(vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tu(vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tum(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tum(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tum(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tum(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tum(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tum(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tum(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_tumu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_tumu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_tumu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_tumu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_tumu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_tumu(vbool16_t vm,
+ vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_tumu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @test_vluxseg8ei8_v_bf16mf4x8_mu(
+// CHECK-RV64-SAME: <vscale x 1 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 1 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 2 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv2i8_8t.p0.nxv1i8.nxv1i1.i64(target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 1 x i8> [[RS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 2 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf4x8_t test_vluxseg8ei8_v_bf16mf4x8_mu(vbool64_t vm,
+ vbfloat16mf4x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf8_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @test_vluxseg8ei8_v_bf16mf2x8_mu(
+// CHECK-RV64-SAME: <vscale x 2 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 2 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 4 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv4i8_8t.p0.nxv2i8.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 2 x i8> [[RS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 4 x i8>, 8) [[TMP0]]
+//
+vbfloat16mf2x8_t test_vluxseg8ei8_v_bf16mf2x8_mu(vbool32_t vm,
+ vbfloat16mf2x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf4_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @test_vluxseg8ei8_v_bf16m1x8_mu(
+// CHECK-RV64-SAME: <vscale x 4 x i1> [[VM:%.*]], target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD:%.*]], ptr noundef [[RS1:%.*]], <vscale x 4 x i8> [[RS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv8i8_8t.p0.nxv4i8.nxv4i1.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[VD]], ptr [[RS1]], <vscale x 4 x i8> [[RS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1, i64 4)
+// CHECK-RV64-NEXT: ret target("riscv.vector.tuple", <vscale x 8 x i8>, 8) [[TMP0]]
+//
+vbfloat16m1x8_t test_vluxseg8ei8_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd,
+ const __bf16 *rs1,
+ vuint8mf2_t rs2, size_t vl) {
+ return __riscv_vluxseg8ei8_mu(vm, vd, rs1, rs2, vl);
+}
More information about the cfe-commits
mailing list