[llvm] b089e40 - [RISCV] Don't allow i64 vector div by constant to use mulh with Zve64x

via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 25 09:55:10 PST 2022


Author: eopXD
Date: 2022-01-25T09:55:05-08:00
New Revision: b089e4072a012dc0c8233cada37326f686ca2604

URL: https://github.com/llvm/llvm-project/commit/b089e4072a012dc0c8233cada37326f686ca2604
DIFF: https://github.com/llvm/llvm-project/commit/b089e4072a012dc0c8233cada37326f686ca2604.diff

LOG: [RISCV] Don't allow i64 vector div by constant to use mulh with Zve64x

EEW=64 of mulh and its vairants requires V extension.

Authored by: Craig Topper <craig.topper at sifive.com> @craig.topper

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D117947

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVSubtarget.h
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9b427703764e..483d0abc8ad7 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -614,6 +614,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
 
+      // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+      if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
+        setOperationAction(ISD::MULHU, VT, Expand);
+        setOperationAction(ISD::MULHS, VT, Expand);
+      }
+
       setOperationAction(ISD::SMIN, VT, Legal);
       setOperationAction(ISD::SMAX, VT, Legal);
       setOperationAction(ISD::UMIN, VT, Legal);
@@ -910,8 +916,11 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::UMAX, VT, Custom);
         setOperationAction(ISD::ABS,  VT, Custom);
 
-        setOperationAction(ISD::MULHS, VT, Custom);
-        setOperationAction(ISD::MULHU, VT, Custom);
+        // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
+        if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
+          setOperationAction(ISD::MULHS, VT, Custom);
+          setOperationAction(ISD::MULHU, VT, Custom);
+        }
 
         setOperationAction(ISD::SADDSAT, VT, Custom);
         setOperationAction(ISD::UADDSAT, VT, Custom);

diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 8f32e88d57c0..044dda0a1ccc 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -156,6 +156,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
   bool hasStdExtF() const { return HasStdExtF; }
   bool hasStdExtD() const { return HasStdExtD; }
   bool hasStdExtC() const { return HasStdExtC; }
+  bool hasStdExtV() const { return HasStdExtV; }
   bool hasStdExtZba() const { return HasStdExtZba; }
   bool hasStdExtZbb() const { return HasStdExtZbb; }
   bool hasStdExtZbc() const { return HasStdExtZbc; }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
index 16fe495b981c..fc089d690863 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
+; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
+; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
 
 define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vdiv_vv_nxv1i8:
@@ -895,38 +897,45 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 }
 
 define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
-; RV32-LABEL: vdiv_vi_nxv1i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vmulh.vv v8, v8, v9
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v9, v8, a0
-; RV32-NEXT:    vsra.vi v8, v8, 1
-; RV32-NEXT:    vadd.vv v8, v8, v9
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdiv_vi_nxv1i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v9, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v8, v8, v9
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v9, v8, a0
+; RV32-V-NEXT:    vsra.vi v8, v8, 1
+; RV32-V-NEXT:    vadd.vv v8, v8, v9
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdiv_vi_nxv1i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI58_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI58_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v9, v8, a0
-; RV64-NEXT:    vsra.vi v8, v8, 1
-; RV64-NEXT:    vadd.vv v8, v8, v9
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdiv_vi_nxv1i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; ZVE64X-NEXT:    vdiv.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdiv_vi_nxv1i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI58_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI58_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-V-NEXT:    vmulh.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v9, v8, a0
+; RV64-V-NEXT:    vsra.vi v8, v8, 1
+; RV64-V-NEXT:    vadd.vv v8, v8, v9
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
   %vc = sdiv <vscale x 1 x i64> %va, %splat
@@ -969,38 +978,45 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 }
 
 define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
-; RV32-LABEL: vdiv_vi_nxv2i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vmulh.vv v8, v8, v10
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v10, v8, a0
-; RV32-NEXT:    vsra.vi v8, v8, 1
-; RV32-NEXT:    vadd.vv v8, v8, v10
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdiv_vi_nxv2i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v10, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v8, v8, v10
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v10, v8, a0
+; RV32-V-NEXT:    vsra.vi v8, v8, 1
+; RV32-V-NEXT:    vadd.vv v8, v8, v10
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdiv_vi_nxv2i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI61_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI61_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v10, v8, a0
-; RV64-NEXT:    vsra.vi v8, v8, 1
-; RV64-NEXT:    vadd.vv v8, v8, v10
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdiv_vi_nxv2i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; ZVE64X-NEXT:    vdiv.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdiv_vi_nxv2i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI61_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI61_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-V-NEXT:    vmulh.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v10, v8, a0
+; RV64-V-NEXT:    vsra.vi v8, v8, 1
+; RV64-V-NEXT:    vadd.vv v8, v8, v10
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %vc = sdiv <vscale x 2 x i64> %va, %splat
@@ -1043,38 +1059,45 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 }
 
 define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
-; RV32-LABEL: vdiv_vi_nxv4i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vmulh.vv v8, v8, v12
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v12, v8, a0
-; RV32-NEXT:    vsra.vi v8, v8, 1
-; RV32-NEXT:    vadd.vv v8, v8, v12
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdiv_vi_nxv4i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v12, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v8, v8, v12
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v12, v8, a0
+; RV32-V-NEXT:    vsra.vi v8, v8, 1
+; RV32-V-NEXT:    vadd.vv v8, v8, v12
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdiv_vi_nxv4i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI64_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI64_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v12, v8, a0
-; RV64-NEXT:    vsra.vi v8, v8, 1
-; RV64-NEXT:    vadd.vv v8, v8, v12
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdiv_vi_nxv4i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; ZVE64X-NEXT:    vdiv.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdiv_vi_nxv4i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI64_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI64_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-V-NEXT:    vmulh.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v12, v8, a0
+; RV64-V-NEXT:    vsra.vi v8, v8, 1
+; RV64-V-NEXT:    vadd.vv v8, v8, v12
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %vc = sdiv <vscale x 4 x i64> %va, %splat
@@ -1117,41 +1140,47 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 }
 
 define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
-; RV32-LABEL: vdiv_vi_nxv8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vmulh.vv v8, v8, v16
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v16, v8, a0
-; RV32-NEXT:    vsra.vi v8, v8, 1
-; RV32-NEXT:    vadd.vv v8, v8, v16
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdiv_vi_nxv8i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v16, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v8, v8, v16
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v16, v8, a0
+; RV32-V-NEXT:    vsra.vi v8, v8, 1
+; RV32-V-NEXT:    vadd.vv v8, v8, v16
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdiv_vi_nxv8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI67_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI67_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmulh.vx v8, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v16, v8, a0
-; RV64-NEXT:    vsra.vi v8, v8, 1
-; RV64-NEXT:    vadd.vv v8, v8, v16
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdiv_vi_nxv8i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; ZVE64X-NEXT:    vdiv.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdiv_vi_nxv8i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI67_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI67_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-V-NEXT:    vmulh.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v16, v8, a0
+; RV64-V-NEXT:    vsra.vi v8, v8, 1
+; RV64-V-NEXT:    vadd.vv v8, v8, v16
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
   %vc = sdiv <vscale x 8 x i64> %va, %splat
   ret <vscale x 8 x i64> %vc
 }
-

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
index 5bf80bbc3485..2635e14c7e06 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-sdnode.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
+; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
+; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
 
 define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vdivu_vv_nxv1i8:
@@ -820,33 +822,40 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 }
 
 define <vscale x 1 x i64> @vdivu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
-; RV32-LABEL: vdivu_vi_nxv1i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vmulhu.vv v8, v8, v9
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v8, v8, a0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdivu_vi_nxv1i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v9, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v8, v8, v9
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v8, v8, a0
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdivu_vi_nxv1i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v8, v8, a0
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdivu_vi_nxv1i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; ZVE64X-NEXT:    vdivu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdivu_vi_nxv1i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v8, v8, a0
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
   %vc = udiv <vscale x 1 x i64> %va, %splat
@@ -916,33 +925,40 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 }
 
 define <vscale x 2 x i64> @vdivu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
-; RV32-LABEL: vdivu_vi_nxv2i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vmulhu.vv v8, v8, v10
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v8, v8, a0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdivu_vi_nxv2i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v10, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v8, v8, v10
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v8, v8, a0
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdivu_vi_nxv2i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v8, v8, a0
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdivu_vi_nxv2i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; ZVE64X-NEXT:    vdivu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdivu_vi_nxv2i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v8, v8, a0
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %vc = udiv <vscale x 2 x i64> %va, %splat
@@ -1012,33 +1028,40 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 }
 
 define <vscale x 4 x i64> @vdivu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
-; RV32-LABEL: vdivu_vi_nxv4i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vmulhu.vv v8, v8, v12
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v8, v8, a0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdivu_vi_nxv4i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v12, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v8, v8, v12
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v8, v8, a0
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdivu_vi_nxv4i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v8, v8, a0
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdivu_vi_nxv4i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; ZVE64X-NEXT:    vdivu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdivu_vi_nxv4i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v8, v8, a0
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %vc = udiv <vscale x 4 x i64> %va, %splat
@@ -1108,33 +1131,40 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 }
 
 define <vscale x 8 x i64> @vdivu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
-; RV32-LABEL: vdivu_vi_nxv8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vmulhu.vv v8, v8, v16
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v8, v8, a0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vdivu_vi_nxv8i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v16, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v8, v8, v16
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v8, v8, a0
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vdivu_vi_nxv8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmulhu.vx v8, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v8, v8, a0
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vdivu_vi_nxv8i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; ZVE64X-NEXT:    vdivu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vdivu_vi_nxv8i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v8, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v8, v8, a0
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
   %vc = udiv <vscale x 8 x i64> %va, %splat

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
index f6e47b8272b4..9cd03815dc38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
+; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
+; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
 
 define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vrem_vv_nxv1i8:
@@ -929,42 +931,49 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 }
 
 define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
-; RV32-LABEL: vrem_vi_nxv1i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vmulh.vv v9, v8, v9
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v10, v9, a0
-; RV32-NEXT:    vsra.vi v9, v9, 1
-; RV32-NEXT:    vadd.vv v9, v9, v10
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v9
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vrem_vi_nxv1i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v9, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v9, v8, v9
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v10, v9, a0
+; RV32-V-NEXT:    vsra.vi v9, v9, 1
+; RV32-V-NEXT:    vadd.vv v9, v9, v10
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v9
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vrem_vi_nxv1i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI56_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI56_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmulh.vx v9, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v10, v9, a0
-; RV64-NEXT:    vsra.vi v9, v9, 1
-; RV64-NEXT:    vadd.vv v9, v9, v10
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v9
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vrem_vi_nxv1i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; ZVE64X-NEXT:    vrem.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vrem_vi_nxv1i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI56_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI56_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-V-NEXT:    vmulh.vx v9, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v10, v9, a0
+; RV64-V-NEXT:    vsra.vi v9, v9, 1
+; RV64-V-NEXT:    vadd.vv v9, v9, v10
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v9
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
   %vc = srem <vscale x 1 x i64> %va, %splat
@@ -1007,42 +1016,49 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 }
 
 define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
-; RV32-LABEL: vrem_vi_nxv2i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vmulh.vv v10, v8, v10
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v12, v10, a0
-; RV32-NEXT:    vsra.vi v10, v10, 1
-; RV32-NEXT:    vadd.vv v10, v10, v12
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v10
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vrem_vi_nxv2i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v10, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v10, v8, v10
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v12, v10, a0
+; RV32-V-NEXT:    vsra.vi v10, v10, 1
+; RV32-V-NEXT:    vadd.vv v10, v10, v12
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v10
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vrem_vi_nxv2i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI59_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI59_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmulh.vx v10, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v12, v10, a0
-; RV64-NEXT:    vsra.vi v10, v10, 1
-; RV64-NEXT:    vadd.vv v10, v10, v12
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v10
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vrem_vi_nxv2i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; ZVE64X-NEXT:    vrem.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vrem_vi_nxv2i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI59_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI59_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-V-NEXT:    vmulh.vx v10, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v12, v10, a0
+; RV64-V-NEXT:    vsra.vi v10, v10, 1
+; RV64-V-NEXT:    vadd.vv v10, v10, v12
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v10
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %vc = srem <vscale x 2 x i64> %va, %splat
@@ -1085,42 +1101,49 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 }
 
 define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
-; RV32-LABEL: vrem_vi_nxv4i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vmulh.vv v12, v8, v12
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v16, v12, a0
-; RV32-NEXT:    vsra.vi v12, v12, 1
-; RV32-NEXT:    vadd.vv v12, v12, v16
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v12
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vrem_vi_nxv4i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v12, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v12, v8, v12
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v16, v12, a0
+; RV32-V-NEXT:    vsra.vi v12, v12, 1
+; RV32-V-NEXT:    vadd.vv v12, v12, v16
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v12
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vrem_vi_nxv4i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI62_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI62_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmulh.vx v12, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v16, v12, a0
-; RV64-NEXT:    vsra.vi v12, v12, 1
-; RV64-NEXT:    vadd.vv v12, v12, v16
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v12
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vrem_vi_nxv4i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; ZVE64X-NEXT:    vrem.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vrem_vi_nxv4i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI62_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI62_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-V-NEXT:    vmulh.vx v12, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v16, v12, a0
+; RV64-V-NEXT:    vsra.vi v12, v12, 1
+; RV64-V-NEXT:    vadd.vv v12, v12, v16
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v12
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %vc = srem <vscale x 4 x i64> %va, %splat
@@ -1163,42 +1186,49 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 }
 
 define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
-; RV32-LABEL: vrem_vi_nxv8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 748983
-; RV32-NEXT:    addi a0, a0, -586
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    lui a0, 898779
-; RV32-NEXT:    addi a0, a0, 1755
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vmulh.vv v16, v8, v16
-; RV32-NEXT:    li a0, 63
-; RV32-NEXT:    vsrl.vx v24, v16, a0
-; RV32-NEXT:    vsra.vi v16, v16, 1
-; RV32-NEXT:    vadd.vv v16, v16, v24
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v16
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vrem_vi_nxv8i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 748983
+; RV32-V-NEXT:    addi a0, a0, -586
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    lui a0, 898779
+; RV32-V-NEXT:    addi a0, a0, 1755
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v16, (a0), zero
+; RV32-V-NEXT:    vmulh.vv v16, v8, v16
+; RV32-V-NEXT:    li a0, 63
+; RV32-V-NEXT:    vsrl.vx v24, v16, a0
+; RV32-V-NEXT:    vsra.vi v16, v16, 1
+; RV32-V-NEXT:    vadd.vv v16, v16, v24
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v16
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vrem_vi_nxv8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    lui a0, %hi(.LCPI65_0)
-; RV64-NEXT:    ld a0, %lo(.LCPI65_0)(a0)
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmulh.vx v16, v8, a0
-; RV64-NEXT:    li a0, 63
-; RV64-NEXT:    vsrl.vx v24, v16, a0
-; RV64-NEXT:    vsra.vi v16, v16, 1
-; RV64-NEXT:    vadd.vv v16, v16, v24
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v16
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vrem_vi_nxv8i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; ZVE64X-NEXT:    vrem.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vrem_vi_nxv8i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    lui a0, %hi(.LCPI65_0)
+; RV64-V-NEXT:    ld a0, %lo(.LCPI65_0)(a0)
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-V-NEXT:    vmulh.vx v16, v8, a0
+; RV64-V-NEXT:    li a0, 63
+; RV64-V-NEXT:    vsrl.vx v24, v16, a0
+; RV64-V-NEXT:    vsra.vi v16, v16, 1
+; RV64-V-NEXT:    vadd.vv v16, v16, v24
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v16
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
   %vc = srem <vscale x 8 x i64> %va, %splat

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
index e67f25b6b5c7..c049bc6b7699 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-sdnode.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,RV32-V
+; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32,ZVE64X
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64-V
+; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,ZVE64X
 
 define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
 ; CHECK-LABEL: vremu_vv_nxv1i8:
@@ -854,37 +856,44 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
 }
 
 define <vscale x 1 x i64> @vremu_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
-; RV32-LABEL: vremu_vi_nxv1i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vmulhu.vv v9, v8, v9
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v9, v9, a0
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v9
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vremu_vi_nxv1i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v9, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v9, v8, v9
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v9, v9, a0
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v9
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vremu_vi_nxv1i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
-; RV64-NEXT:    vmulhu.vx v9, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v9, v9, a0
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v9
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vremu_vi_nxv1i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; ZVE64X-NEXT:    vremu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vremu_vi_nxv1i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v9, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v9, v9, a0
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v9
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
   %vc = urem <vscale x 1 x i64> %va, %splat
@@ -958,37 +967,44 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
 }
 
 define <vscale x 2 x i64> @vremu_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
-; RV32-LABEL: vremu_vi_nxv2i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v10, (a0), zero
-; RV32-NEXT:    vmulhu.vv v10, v8, v10
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v10, v10, a0
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v10
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vremu_vi_nxv2i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v10, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v10, v8, v10
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v10, v10, a0
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v10
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vremu_vi_nxv2i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
-; RV64-NEXT:    vmulhu.vx v10, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v10, v10, a0
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v10
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vremu_vi_nxv2i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; ZVE64X-NEXT:    vremu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vremu_vi_nxv2i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v10, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v10, v10, a0
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v10
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
   %vc = urem <vscale x 2 x i64> %va, %splat
@@ -1062,37 +1078,44 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
 }
 
 define <vscale x 4 x i64> @vremu_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
-; RV32-LABEL: vremu_vi_nxv4i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v12, (a0), zero
-; RV32-NEXT:    vmulhu.vv v12, v8, v12
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v12, v12, a0
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v12
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vremu_vi_nxv4i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v12, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v12, v8, v12
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v12, v12, a0
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v12
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vremu_vi_nxv4i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
-; RV64-NEXT:    vmulhu.vx v12, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v12, v12, a0
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v12
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vremu_vi_nxv4i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; ZVE64X-NEXT:    vremu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vremu_vi_nxv4i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v12, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v12, v12, a0
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v12
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %vc = urem <vscale x 4 x i64> %va, %splat
@@ -1166,37 +1189,44 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
 }
 
 define <vscale x 8 x i64> @vremu_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
-; RV32-LABEL: vremu_vi_nxv8i64_0:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    lui a0, 131072
-; RV32-NEXT:    sw a0, 12(sp)
-; RV32-NEXT:    li a0, 1
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v16, (a0), zero
-; RV32-NEXT:    vmulhu.vv v16, v8, v16
-; RV32-NEXT:    li a0, 61
-; RV32-NEXT:    vsrl.vx v16, v16, a0
-; RV32-NEXT:    li a0, -7
-; RV32-NEXT:    vnmsac.vx v8, a0, v16
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
+; RV32-V-LABEL: vremu_vi_nxv8i64_0:
+; RV32-V:       # %bb.0:
+; RV32-V-NEXT:    addi sp, sp, -16
+; RV32-V-NEXT:    .cfi_def_cfa_offset 16
+; RV32-V-NEXT:    lui a0, 131072
+; RV32-V-NEXT:    sw a0, 12(sp)
+; RV32-V-NEXT:    li a0, 1
+; RV32-V-NEXT:    sw a0, 8(sp)
+; RV32-V-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
+; RV32-V-NEXT:    addi a0, sp, 8
+; RV32-V-NEXT:    vlse64.v v16, (a0), zero
+; RV32-V-NEXT:    vmulhu.vv v16, v8, v16
+; RV32-V-NEXT:    li a0, 61
+; RV32-V-NEXT:    vsrl.vx v16, v16, a0
+; RV32-V-NEXT:    li a0, -7
+; RV32-V-NEXT:    vnmsac.vx v8, a0, v16
+; RV32-V-NEXT:    addi sp, sp, 16
+; RV32-V-NEXT:    ret
 ;
-; RV64-LABEL: vremu_vi_nxv8i64_0:
-; RV64:       # %bb.0:
-; RV64-NEXT:    li a0, 1
-; RV64-NEXT:    slli a0, a0, 61
-; RV64-NEXT:    addi a0, a0, 1
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT:    vmulhu.vx v16, v8, a0
-; RV64-NEXT:    li a0, 61
-; RV64-NEXT:    vsrl.vx v16, v16, a0
-; RV64-NEXT:    li a0, -7
-; RV64-NEXT:    vnmsac.vx v8, a0, v16
-; RV64-NEXT:    ret
+; ZVE64X-LABEL: vremu_vi_nxv8i64_0:
+; ZVE64X:       # %bb.0:
+; ZVE64X-NEXT:    li a0, -7
+; ZVE64X-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; ZVE64X-NEXT:    vremu.vx v8, v8, a0
+; ZVE64X-NEXT:    ret
+;
+; RV64-V-LABEL: vremu_vi_nxv8i64_0:
+; RV64-V:       # %bb.0:
+; RV64-V-NEXT:    li a0, 1
+; RV64-V-NEXT:    slli a0, a0, 61
+; RV64-V-NEXT:    addi a0, a0, 1
+; RV64-V-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-V-NEXT:    vmulhu.vx v16, v8, a0
+; RV64-V-NEXT:    li a0, 61
+; RV64-V-NEXT:    vsrl.vx v16, v16, a0
+; RV64-V-NEXT:    li a0, -7
+; RV64-V-NEXT:    vnmsac.vx v8, a0, v16
+; RV64-V-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
   %vc = urem <vscale x 8 x i64> %va, %splat


        


More information about the llvm-commits mailing list