[llvm] 45d3ed0 - [RISCV] Add support for scalable vector masked load/store.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 12 10:49:21 PST 2021


Author: Craig Topper
Date: 2021-03-12T10:32:33-08:00
New Revision: 45d3ed0304441983f217a6b696295fc37d9fe2b6

URL: https://github.com/llvm/llvm-project/commit/45d3ed0304441983f217a6b696295fc37d9fe2b6
DIFF: https://github.com/llvm/llvm-project/commit/45d3ed0304441983f217a6b696295fc37d9fe2b6.diff

LOG: [RISCV] Add support for scalable vector masked load/store.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D98460

Added: 
    llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
    llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
    llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
    llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index eaa404fa3be81..d847296e7e258 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -33,6 +33,21 @@ def SplatPat       : ComplexPattern<vAny, 1, "selectVSplat",      [splat_vector,
 def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [splat_vector, rv32_splat_i64], [], 2>;
 def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [splat_vector, rv32_splat_i64], [], 2>;
 
+def masked_load :
+  PatFrag<(ops node:$ptr, node:$mask, node:$maskedoff),
+          (masked_ld node:$ptr, undef, node:$mask, node:$maskedoff), [{
+  return !cast<MaskedLoadSDNode>(N)->isExpandingLoad() &&
+    cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
+    cast<MaskedLoadSDNode>(N)->isUnindexed();
+}]>;
+def masked_store :
+  PatFrag<(ops node:$val, node:$ptr, node:$mask),
+          (masked_st node:$val, node:$ptr, undef, node:$mask), [{
+  return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
+         !cast<MaskedStoreSDNode>(N)->isCompressingStore() &&
+         cast<MaskedStoreSDNode>(N)->isUnindexed();
+}]>;
+
 class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> {
    dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix);
 }
@@ -53,6 +68,25 @@ multiclass VPatUSLoadStoreSDNode<ValueType type,
             (store_instr reg_class:$rs2, BaseAddr:$rs1, avl, sew)>;
 }
 
+multiclass VPatUSLoadStoreSDNodeMask<ValueType type,
+                                     ValueType mask_type,
+                                     int sew,
+                                     LMULInfo vlmul,
+                                     OutPatFrag avl,
+                                     VReg reg_class>
+{
+  defvar load_instr = !cast<Instruction>("PseudoVLE"#sew#"_V_"#vlmul.MX#"_MASK");
+  defvar store_instr = !cast<Instruction>("PseudoVSE"#sew#"_V_"#vlmul.MX#"_MASK");
+  // Load
+  def : Pat<(type (masked_load BaseAddr:$rs1, (mask_type V0), type:$merge)),
+            (load_instr reg_class:$merge, BaseAddr:$rs1, (mask_type V0),
+                        avl, sew)>;
+  // Store
+  def : Pat<(masked_store type:$rs2, BaseAddr:$rs1, (mask_type V0)),
+            (store_instr reg_class:$rs2, BaseAddr:$rs1, (mask_type V0),
+                         avl, sew)>;
+}
+
 multiclass VPatUSLoadStoreWholeVRSDNode<ValueType type,
                                         int sew,
                                         LMULInfo vlmul,
@@ -360,6 +394,9 @@ foreach vti = !listconcat(FractionalGroupIntegerVectors,
                           FractionalGroupFloatVectors) in
   defm "" : VPatUSLoadStoreSDNode<vti.Vector, vti.SEW, vti.LMul,
                                   vti.AVL, vti.RegClass>;
+foreach vti = AllVectors in
+  defm "" : VPatUSLoadStoreSDNodeMask<vti.Vector, vti.Mask, vti.SEW, vti.LMul,
+                                      vti.AVL, vti.RegClass>;
 foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in
   defm "" : VPatUSLoadStoreWholeVRSDNode<vti.Vector, vti.SEW, vti.LMul,
                                          vti.RegClass>;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
new file mode 100644
index 0000000000000..85a7cd023f2d2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @masked_load_nxv1f16(<vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
+  ret <vscale x 1 x half> %load
+}
+declare <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>*, i32, <vscale x 1 x i1>, <vscale x 1 x half>)
+
+define <vscale x 1 x float> @masked_load_nxv1f32(<vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
+  ret <vscale x 1 x float> %load
+}
+declare <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>*, i32, <vscale x 1 x i1>, <vscale x 1 x float>)
+
+define <vscale x 1 x double> @masked_load_nxv1f64(<vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
+  ret <vscale x 1 x double> %load
+}
+declare <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>*, i32, <vscale x 1 x i1>, <vscale x 1 x double>)
+
+define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
+  ret <vscale x 2 x half> %load
+}
+declare <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>*, i32, <vscale x 2 x i1>, <vscale x 2 x half>)
+
+define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
+  ret <vscale x 2 x float> %load
+}
+declare <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>*, i32, <vscale x 2 x i1>, <vscale x 2 x float>)
+
+define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
+  ret <vscale x 2 x double> %load
+}
+declare <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>*, i32, <vscale x 2 x i1>, <vscale x 2 x double>)
+
+define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
+  ret <vscale x 4 x half> %load
+}
+declare <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>*, i32, <vscale x 4 x i1>, <vscale x 4 x half>)
+
+define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
+  ret <vscale x 4 x float> %load
+}
+declare <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>*, i32, <vscale x 4 x i1>, <vscale x 4 x float>)
+
+define <vscale x 4 x double> @masked_load_nxv4f64(<vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
+  ret <vscale x 4 x double> %load
+}
+declare <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>*, i32, <vscale x 4 x i1>, <vscale x 4 x double>)
+
+define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
+  ret <vscale x 8 x half> %load
+}
+declare <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>*, i32, <vscale x 8 x i1>, <vscale x 8 x half>)
+
+define <vscale x 8 x float> @masked_load_nxv8f32(<vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
+  ret <vscale x 8 x float> %load
+}
+declare <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>*, i32, <vscale x 8 x i1>, <vscale x 8 x float>)
+
+define <vscale x 8 x double> @masked_load_nxv8f64(<vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
+  ret <vscale x 8 x double> %load
+}
+declare <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>*, i32, <vscale x 8 x i1>, <vscale x 8 x double>)
+
+define <vscale x 16 x half> @masked_load_nxv16f16(<vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
+  ret <vscale x 16 x half> %load
+}
+declare <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>*, i32, <vscale x 16 x i1>, <vscale x 16 x half>)
+
+define <vscale x 16 x float> @masked_load_nxv16f32(<vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
+  ret <vscale x 16 x float> %load
+}
+declare <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>*, i32, <vscale x 16 x i1>, <vscale x 16 x float>)
+
+define <vscale x 32 x half> @masked_load_nxv32f16(<vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
+  ret <vscale x 32 x half> %load
+}
+declare <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>*, i32, <vscale x 32 x i1>, <vscale x 32 x half>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
new file mode 100644
index 0000000000000..e907f675689a0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i8> @masked_load_nxv1i8(<vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
+  ret <vscale x 1 x i8> %load
+}
+declare <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>*, i32, <vscale x 1 x i1>, <vscale x 1 x i8>)
+
+define <vscale x 1 x i16> @masked_load_nxv1i16(<vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
+  ret <vscale x 1 x i16> %load
+}
+declare <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>*, i32, <vscale x 1 x i1>, <vscale x 1 x i16>)
+
+define <vscale x 1 x i32> @masked_load_nxv1i32(<vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
+  ret <vscale x 1 x i32> %load
+}
+declare <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>*, i32, <vscale x 1 x i1>, <vscale x 1 x i32>)
+
+define <vscale x 1 x i64> @masked_load_nxv1i64(<vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
+  ret <vscale x 1 x i64> %load
+}
+declare <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>*, i32, <vscale x 1 x i1>, <vscale x 1 x i64>)
+
+define <vscale x 2 x i8> @masked_load_nxv2i8(<vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
+  ret <vscale x 2 x i8> %load
+}
+declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
+
+define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
+  ret <vscale x 2 x i16> %load
+}
+declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
+
+define <vscale x 2 x i32> @masked_load_nxv2i32(<vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
+  ret <vscale x 2 x i32> %load
+}
+declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
+
+define <vscale x 2 x i64> @masked_load_nxv2i64(<vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
+  ret <vscale x 2 x i64> %load
+}
+declare <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>*, i32, <vscale x 2 x i1>, <vscale x 2 x i64>)
+
+define <vscale x 4 x i8> @masked_load_nxv4i8(<vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
+  ret <vscale x 4 x i8> %load
+}
+declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
+
+define <vscale x 4 x i16> @masked_load_nxv4i16(<vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
+  ret <vscale x 4 x i16> %load
+}
+declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
+
+define <vscale x 4 x i32> @masked_load_nxv4i32(<vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
+  ret <vscale x 4 x i32> %load
+}
+declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
+
+define <vscale x 4 x i64> @masked_load_nxv4i64(<vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
+  ret <vscale x 4 x i64> %load
+}
+declare <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>*, i32, <vscale x 4 x i1>, <vscale x 4 x i64>)
+
+define <vscale x 8 x i8> @masked_load_nxv8i8(<vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
+  ret <vscale x 8 x i8> %load
+}
+declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
+
+define <vscale x 8 x i16> @masked_load_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
+  ret <vscale x 8 x i16> %load
+}
+declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
+
+define <vscale x 8 x i32> @masked_load_nxv8i32(<vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
+  ret <vscale x 8 x i32> %load
+}
+declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>*, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
+
+define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,tu,mu
+; CHECK-NEXT:    vle64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
+  ret <vscale x 8 x i64> %load
+}
+declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>*, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)
+
+define <vscale x 16 x i8> @masked_load_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
+  ret <vscale x 16 x i8> %load
+}
+declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
+
+define <vscale x 16 x i16> @masked_load_nxv16i16(<vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
+  ret <vscale x 16 x i16> %load
+}
+declare <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>*, i32, <vscale x 16 x i1>, <vscale x 16 x i16>)
+
+define <vscale x 16 x i32> @masked_load_nxv16i32(<vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,tu,mu
+; CHECK-NEXT:    vle32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
+  ret <vscale x 16 x i32> %load
+}
+declare <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>*, i32, <vscale x 16 x i1>, <vscale x 16 x i32>)
+
+define <vscale x 32 x i8> @masked_load_nxv32i8(<vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
+  ret <vscale x 32 x i8> %load
+}
+declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>*, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
+
+define <vscale x 32 x i16> @masked_load_nxv32i16(<vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,tu,mu
+; CHECK-NEXT:    vle16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
+  ret <vscale x 32 x i16> %load
+}
+declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>*, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
+
+define <vscale x 64 x i8> @masked_load_nxv64i8(<vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_load_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,tu,mu
+; CHECK-NEXT:    vle8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  %load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>* %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
+  ret <vscale x 64 x i8> %load
+}
+declare <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>*, i32, <vscale x 64 x i1>, <vscale x 64 x i8>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
new file mode 100644
index 0000000000000..8f35137536571
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-fp.ll
@@ -0,0 +1,168 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s
+
+define void @masked_store_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv1f16.p0nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv1f16.p0nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv1f32.p0nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv1f32.p0nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv1f64.p0nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv1f64.p0nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv2f16.p0nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv2f16.p0nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv2f32.p0nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv2f64.p0nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv2f64.p0nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv4f16.p0nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv4f16.p0nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv4f32.p0nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv4f64.p0nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv4f64.p0nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv8f16.p0nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv8f16.p0nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv8f32.p0nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv8f32.p0nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8f64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv8f64.p0nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv8f64.p0nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv16f16(<vscale x 16 x half> %val, <vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv16f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv16f16.p0nxv16f16(<vscale x 16 x half> %val, <vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv16f16.p0nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>*, i32, <vscale x 16 x i1>)
+
+define void @masked_store_nxv16f32(<vscale x 16 x float> %val, <vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv16f32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv16f32.p0nxv16f32(<vscale x 16 x float> %val, <vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv16f32.p0nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>*, i32, <vscale x 16 x i1>)
+
+define void @masked_store_nxv32f16(<vscale x 32 x half> %val, <vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv32f16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.nxv32f16.p0nxv32f16(<vscale x 32 x half> %val, <vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.nxv32f16.p0nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>*, i32, <vscale x 32 x i1>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
new file mode 100644
index 0000000000000..2a10898d449d5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-store-int.ll
@@ -0,0 +1,245 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define void @masked_store_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v1i8.p0v1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v1i8.p0v1i8(<vscale x 1 x i8>, <vscale x 1 x i8>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v1i16.p0v1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v1i16.p0v1i16(<vscale x 1 x i16>, <vscale x 1 x i16>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v1i32.p0v1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v1i32.p0v1i32(<vscale x 1 x i32>, <vscale x 1 x i32>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v1i64.p0v1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v1i64.p0v1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32, <vscale x 1 x i1>)
+
+define void @masked_store_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v2i8.p0v2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v2i8.p0v2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v2i16.p0v2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v2i16.p0v2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v2i32.p0v2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v2i32.p0v2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v2i64.p0v2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v2i64.p0v2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32, <vscale x 2 x i1>)
+
+define void @masked_store_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v4i8.p0v4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v4i8.p0v4i8(<vscale x 4 x i8>, <vscale x 4 x i8>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v4i16.p0v4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v4i16.p0v4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v4i32.p0v4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v4i32.p0v4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v4i64.p0v4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v4i64.p0v4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32, <vscale x 4 x i1>)
+
+define void @masked_store_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v8i8.p0v8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v8i8.p0v8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v8i16.p0v8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v8i16.p0v8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v8i32.p0v8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v8i32.p0v8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vse64.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v8i64.p0v8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v8i64.p0v8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32, <vscale x 8 x i1>)
+
+define void @masked_store_nxv16i8(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v16i8.p0v16i8(<vscale x 16 x i8> %val, <vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v16i8.p0v16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
+
+define void @masked_store_nxv16i16(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v16i16.p0v16i16(<vscale x 16 x i16> %val, <vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v16i16.p0v16i16(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32, <vscale x 16 x i1>)
+
+define void @masked_store_nxv16i32(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v16i32.p0v16i32(<vscale x 16 x i32> %val, <vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v16i32.p0v16i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32, <vscale x 16 x i1>)
+
+define void @masked_store_nxv32i8(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v32i8.p0v32i8(<vscale x 32 x i8> %val, <vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v32i8.p0v32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32, <vscale x 32 x i1>)
+
+define void @masked_store_nxv32i16(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vse16.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v32i16.p0v32i16(<vscale x 32 x i16> %val, <vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v32i16.p0v32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32, <vscale x 32 x i1>)
+
+define void @masked_store_nxv64i8(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
+; CHECK-LABEL: masked_store_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vse8.v v8, (a0), v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.masked.store.v64i8.p0v64i8(<vscale x 64 x i8> %val, <vscale x 64 x i8>* %a, i32 4, <vscale x 64 x i1> %mask)
+  ret void
+}
+declare void @llvm.masked.store.v64i8.p0v64i8(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32, <vscale x 64 x i1>)


        


More information about the llvm-commits mailing list