[llvm] 61a4637 - [RISCV][VP][NFC] Add tests for VP_SREM and VP_UREM
Fraser Cormack via llvm-commits
llvm-commits at lists.llvm.org
Wed May 5 05:21:06 PDT 2021
Author: Fraser Cormack
Date: 2021-05-05T13:13:34+01:00
New Revision: 61a46375a25b817da8657378b3fbd707a5e54771
URL: https://github.com/llvm/llvm-project/commit/61a46375a25b817da8657378b3fbd707a5e54771
DIFF: https://github.com/llvm/llvm-project/commit/61a46375a25b817da8657378b3fbd707a5e54771.diff
LOG: [RISCV][VP][NFC] Add tests for VP_SREM and VP_UREM
As agreed in D101826, these are follow-up tests for the RISC-V VP
support.
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
new file mode 100644
index 000000000000..6e0cefe30624
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
@@ -0,0 +1,917 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <2 x i8> @llvm.vp.srem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vrem_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vrem_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vrem_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.srem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vrem_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vrem_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vrem_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.srem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vrem_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vrem_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vrem_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.srem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vrem_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vrem_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vrem_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vrem_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.srem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vrem_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vrem_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vrem_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vrem_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.srem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vrem_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vrem_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vrem_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vrem_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.srem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vrem_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vrem_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vrem_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vrem_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.srem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vrem_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vrem_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vrem_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vrem_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.srem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vrem_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vrem_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vrem_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vrem_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vrem_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vrem_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vrem_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vrem_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.srem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vrem_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vrem_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vrem_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vrem_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.srem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vrem_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vrem_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vrem_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vrem_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.srem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vrem_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vrem_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vrem_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 2, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v25, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 2, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v25
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.srem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vrem_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vrem_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vrem_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v26, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v26
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.srem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vrem_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vrem_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vrem_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 8, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v28, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 8, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v28
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.srem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vrem_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vrem_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vrem_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
new file mode 100644
index 000000000000..b55ae466685a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
@@ -0,0 +1,917 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <2 x i8> @llvm.vp.urem.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vremu_vv_v2i8_unmasked(<2 x i8> %va, <2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vremu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vremu_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <2 x i8> %elt.head, <2 x i8> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.urem.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vremu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vremu_vv_v4i8_unmasked(<4 x i8> %va, <4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vremu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <4 x i8> %elt.head, <4 x i8> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.urem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vremu_vv_v8i8_unmasked(<8 x i8> %va, <8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vremu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vremu_vx_v8i8_unmasked(<8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <8 x i8> %elt.head, <8 x i8> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.urem.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vremu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vremu_vv_v16i8_unmasked(<16 x i8> %va, <16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vremu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vremu_vx_v16i8_unmasked(<16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <16 x i8> %elt.head, <16 x i8> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.urem.v2i16(<2 x i16>, <2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vremu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vremu_vv_v2i16_unmasked(<2 x i16> %va, <2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vremu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vremu_vx_v2i16_unmasked(<2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <2 x i16> %elt.head, <2 x i16> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.urem.v4i16(<4 x i16>, <4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vremu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vremu_vv_v4i16_unmasked(<4 x i16> %va, <4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vremu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vremu_vx_v4i16_unmasked(<4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <4 x i16> %elt.head, <4 x i16> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.urem.v8i16(<8 x i16>, <8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vremu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vremu_vv_v8i16_unmasked(<8 x i16> %va, <8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vremu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vremu_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <8 x i16> %elt.head, <8 x i16> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.urem.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vremu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vremu_vv_v16i16_unmasked(<16 x i16> %va, <16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vremu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vremu_vx_v16i16_unmasked(<16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <16 x i16> %elt.head, <16 x i16> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.urem.v2i32(<2 x i32>, <2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vremu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vremu_vv_v2i32_unmasked(<2 x i32> %va, <2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vremu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vremu_vx_v2i32_unmasked(<2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <2 x i32> %elt.head, <2 x i32> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vremu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vremu_vv_v4i32_unmasked(<4 x i32> %va, <4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vremu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vremu_vx_v4i32_unmasked(<4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <4 x i32> %elt.head, <4 x i32> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.urem.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vremu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vremu_vv_v8i32_unmasked(<8 x i32> %va, <8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vremu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vremu_vx_v8i32_unmasked(<8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <8 x i32> %elt.head, <8 x i32> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.urem.v16i32(<16 x i32>, <16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vremu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vremu_vv_v16i32_unmasked(<16 x i32> %va, <16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vremu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vremu_vx_v16i32_unmasked(<16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_v16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <16 x i32> %elt.head, <16 x i32> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.urem.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vremu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vremu_vv_v2i64_unmasked(<2 x i64> %va, <2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vremu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 2, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v25, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 2, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v25
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <2 x i64> %elt.head, <2 x i64> undef, <2 x i32> zeroinitializer
+ %head = insertelement <2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %vb, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.urem.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vremu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vremu_vv_v4i64_unmasked(<4 x i64> %va, <4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vremu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v26, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 4, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v26
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <4 x i64> %elt.head, <4 x i64> undef, <4 x i32> zeroinitializer
+ %head = insertelement <4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %vb, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.urem.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vremu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vremu_vv_v8i64_unmasked(<8 x i64> %va, <8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vremu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 8, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v28, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 8, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v28
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <8 x i64> %elt.head, <8 x i64> undef, <8 x i32> zeroinitializer
+ %head = insertelement <8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.urem.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vremu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vremu_vv_v16i64_unmasked(<16 x i64> %va, <16 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_v16i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vremu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetivli a0, 16, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <16 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <16 x i64> %elt.head, <16 x i64> undef, <16 x i32> zeroinitializer
+ %head = insertelement <16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %vb, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
new file mode 100644
index 000000000000..c1c67f4a7a5d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
@@ -0,0 +1,1217 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vrem_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vrem_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vrem_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vrem_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vrem_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vrem_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vrem_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vrem_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vrem_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vrem_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vrem_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vrem_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vrem_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vrem_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vrem_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vrem_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vrem_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vrem_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vrem_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vrem_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vrem_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vrem_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vrem_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vrem_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vrem_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vrem_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vrem_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vrem_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vrem_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vrem_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vrem_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vrem_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vrem_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vrem_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vrem_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vrem_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrem.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vrem_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v25, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vrem_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v25
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vrem_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v26, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vrem_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v26
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vrem_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v28, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vrem_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v28
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vrem_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vrem_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrem.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vrem_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vrem_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vrem.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrem_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vrem.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
new file mode 100644
index 000000000000..5258511da3cc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -0,0 +1,1217 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vremu_vv_nxv1i8_unmasked(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vremu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vremu_vx_nxv1i8_unmasked(<vscale x 1 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i8> %elt.head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vremu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vremu_vv_nxv2i8_unmasked(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vremu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vremu_vx_nxv2i8_unmasked(<vscale x 2 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i8> %elt.head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vremu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vremu_vv_nxv4i8_unmasked(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vremu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vremu_vx_nxv4i8_unmasked(<vscale x 4 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i8> %elt.head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vremu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vremu_vv_nxv8i8_unmasked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vremu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vremu_vx_nxv8i8_unmasked(<vscale x 8 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i8> %elt.head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vremu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vremu_vv_nxv16i8_unmasked(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vremu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vremu_vx_nxv16i8_unmasked(<vscale x 16 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i8> %elt.head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vremu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vremu_vv_nxv32i8_unmasked(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vremu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vremu_vx_nxv32i8_unmasked(<vscale x 32 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i8> %elt.head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vremu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vremu_vv_nxv64i8_unmasked(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vremu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vremu_vx_nxv64i8_unmasked(<vscale x 64 x i8> %va, i8 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
+ %vb = shufflevector <vscale x 64 x i8> %elt.head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
+ %head = insertelement <vscale x 64 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> undef, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vremu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vremu_vv_nxv1i16_unmasked(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vremu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vremu_vx_nxv1i16_unmasked(<vscale x 1 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i16> %elt.head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vremu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vremu_vv_nxv2i16_unmasked(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vremu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vremu_vx_nxv2i16_unmasked(<vscale x 2 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i16> %elt.head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vremu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vremu_vv_nxv4i16_unmasked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vremu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vremu_vx_nxv4i16_unmasked(<vscale x 4 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i16> %elt.head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vremu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vremu_vv_nxv8i16_unmasked(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vremu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vremu_vx_nxv8i16_unmasked(<vscale x 8 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i16> %elt.head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vremu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vremu_vv_nxv16i16_unmasked(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vremu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vremu_vx_nxv16i16_unmasked(<vscale x 16 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i16> %elt.head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vremu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vremu_vv_nxv32i16_unmasked(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vremu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv32i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vremu_vx_nxv32i16_unmasked(<vscale x 32 x i16> %va, i16 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv32i16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
+ %vb = shufflevector <vscale x 32 x i16> %elt.head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
+ %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vremu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vremu_vv_nxv1i32_unmasked(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vremu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vremu_vx_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv1i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i32> %elt.head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vremu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vremu_vv_nxv2i32_unmasked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vremu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vremu_vx_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv2i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i32> %elt.head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vremu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vremu_vv_nxv4i32_unmasked(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vremu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vremu_vx_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv4i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i32> %elt.head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vremu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vremu_vv_nxv8i32_unmasked(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vremu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vremu_vx_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv8i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i32> %elt.head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vremu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vremu_vv_nxv16i32_unmasked(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vremu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vremu_vx_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vx_nxv16i32_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vremu.vx v8, v8, a0
+; CHECK-NEXT: ret
+ %elt.head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
+ %vb = shufflevector <vscale x 16 x i32> %elt.head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
+ %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vremu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vremu_vv_nxv1i64_unmasked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv1i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v25, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vremu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v25, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m1,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v25
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 1 x i64> %elt.head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
+ %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vremu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vremu_vv_nxv2i64_unmasked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv2i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v26, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vremu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v26, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m2,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v26
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 2 x i64> %elt.head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+ %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vremu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vremu_vv_nxv4i64_unmasked(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv4i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v28, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vremu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v28, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m4,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v28
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 4 x i64> %elt.head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
+ %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vremu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vremu_vv_nxv8i64_unmasked(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, i32 zeroext %evl) {
+; CHECK-LABEL: vremu_vv_nxv8i64_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vremu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vremu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %b, i32 zeroext %evl) {
+; RV32-LABEL: vremu_vx_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; RV32-NEXT: vremu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vremu_vx_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; RV64-NEXT: vremu.vx v8, v8, a0
+; RV64-NEXT: ret
+ %elt.head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %vb = shufflevector <vscale x 8 x i64> %elt.head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
More information about the llvm-commits
mailing list