[llvm] c0873fa - [RISCV] Add trunc-sat-clip tests for i32->i8, i64->i8, and i64->i16. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed May 29 16:52:35 PDT 2024


Author: Craig Topper
Date: 2024-05-29T16:50:21-07:00
New Revision: c0873fa20eb5dcba303a003bdd5192d341f89eaa

URL: https://github.com/llvm/llvm-project/commit/c0873fa20eb5dcba303a003bdd5192d341f89eaa
DIFF: https://github.com/llvm/llvm-project/commit/c0873fa20eb5dcba303a003bdd5192d341f89eaa.diff

LOG: [RISCV] Add trunc-sat-clip tests for i32->i8, i64->i8, and i64->i16. NFC

These can be implemented with multiple vnclips.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll
    llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll
index 414b23ffb582a..9f82eddf432da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-sat-clip.ll
@@ -410,3 +410,377 @@ define void @trunc_sat_u32u64_minmax(ptr %x, ptr %y) {
   store <4 x i32> %4, ptr %y, align 8
   ret void
 }
+
+define void @trunc_sat_i8i32_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i32_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i32>, ptr %x, align 16
+  %2 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %1, <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>)
+  %3 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %2, <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i32_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i32_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i32>, ptr %x, align 16
+  %2 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %1, <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
+  %3 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %2, <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>)
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_u8u32_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i32>, ptr %x, align 16
+  %2 = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %1, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
+  %3 = trunc <4 x i32> %2 to <4 x i8>
+  store <4 x i8> %3, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u32_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i32>, ptr %x, align 16
+  %2 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %1, <4 x i32> zeroinitializer)
+  %3 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %2, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u32_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i32>, ptr %x, align 16
+  %2 = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %1, <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
+  %3 = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %2, <4 x i32> zeroinitializer)
+  %4 = trunc <4 x i32> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %1, <4 x i64> <i64 -128, i64 -128, i64 -128, i64 -128>)
+  %3 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %2, <4 x i64> <i64 127, i64 127, i64 127, i64 127>)
+  %4 = trunc <4 x i64> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %1, <4 x i64> <i64 127, i64 127, i64 127, i64 127>)
+  %3 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %2, <4 x i64> <i64 -128, i64 -128, i64 -128, i64 -128>)
+  %4 = trunc <4 x i64> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_u8u64_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %1, <4 x i64> <i64 255, i64 255, i64 255, i64 255>)
+  %3 = trunc <4 x i64> %2 to <4 x i8>
+  store <4 x i8> %3, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %1, <4 x i64> zeroinitializer)
+  %3 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %2, <4 x i64> <i64 255, i64 255, i64 255, i64 255>)
+  %4 = trunc <4 x i64> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %1, <4 x i64> <i64 255, i64 255, i64 255, i64 255>)
+  %3 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %2, <4 x i64> zeroinitializer)
+  %4 = trunc <4 x i64> %3 to <4 x i8>
+  store <4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i16i64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i16i64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 1048568
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 32
+  %2 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %1, <4 x i64> <i64 -32768, i64 -32768, i64 -32768, i64 -32768>)
+  %3 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %2, <4 x i64> <i64 32767, i64 32767, i64 32767, i64 32767>)
+  %4 = trunc <4 x i64> %3 to <4 x i16>
+  store <4 x i16> %4, ptr %y, align 16
+  ret void
+}
+
+define void @trunc_sat_i16i64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i16i64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 1048568
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 32
+  %2 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %1, <4 x i64> <i64 32767, i64 32767, i64 32767, i64 32767>)
+  %3 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %2, <4 x i64> <i64 -32768, i64 -32768, i64 -32768, i64 -32768>)
+  %4 = trunc <4 x i64> %3 to <4 x i16>
+  store <4 x i16> %4, ptr %y, align 16
+  ret void
+}
+
+define void @trunc_sat_u16u64_notopt(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_notopt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 32
+  %2 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %1, <4 x i64> <i64 32767, i64 32767, i64 32767, i64 32767>)
+  %3 = trunc <4 x i64> %2 to <4 x i16>
+  store <4 x i16> %3, ptr %y, align 16
+  ret void
+}
+
+define void @trunc_sat_u16u64_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 32
+  %2 = tail call <4 x i64> @llvm.umin.v4i64(<4 x i64> %1, <4 x i64> <i64 65535, i64 65535, i64 65535, i64 65535>)
+  %3 = trunc <4 x i64> %2 to <4 x i16>
+  store <4 x i16> %3, ptr %y, align 16
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u16u64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %1, <4 x i64> <i64 1, i64 1, i64 1, i64 1>)
+  %3 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %2, <4 x i64> <i64 65535, i64 65535, i64 65535, i64 65535>)
+  %4 = trunc <4 x i64> %3 to <4 x i16>
+  store <4 x i16> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u16u64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 50
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse16.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <4 x i64>, ptr %x, align 16
+  %2 = tail call <4 x i64> @llvm.smin.v4i64(<4 x i64> %1, <4 x i64> <i64 65535, i64 65535, i64 65535, i64 65535>)
+  %3 = tail call <4 x i64> @llvm.smax.v4i64(<4 x i64> %2, <4 x i64> <i64 50, i64 50, i64 50, i64 50>)
+  %4 = trunc <4 x i64> %3 to <4 x i16>
+  store <4 x i16> %4, ptr %y, align 8
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll
index fcb49c2187191..78e8f0fbbbdd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/trunc-sat-clip-sdnode.ll
@@ -410,3 +410,356 @@ define void @trunc_sat_u32u64_minmax(ptr %x, ptr %y) {
   store <vscale x 4 x i32> %4, ptr %y, align 8
   ret void
 }
+
+define void @trunc_sat_i8i32_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i32_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i32>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i32> @llvm.smax.v4i32(<vscale x 4 x i32> %1, <vscale x 4 x i32> splat (i32 -128))
+  %3 = tail call <vscale x 4 x i32> @llvm.smin.v4i32(<vscale x 4 x i32> %2, <vscale x 4 x i32> splat (i32 127))
+  %4 = trunc <vscale x 4 x i32> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i32_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i32_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i32>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i32> @llvm.smin.v4i32(<vscale x 4 x i32> %1, <vscale x 4 x i32> splat (i32 127))
+  %3 = tail call <vscale x 4 x i32> @llvm.smax.v4i32(<vscale x 4 x i32> %2, <vscale x 4 x i32> splat (i32 -128))
+  %4 = trunc <vscale x 4 x i32> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_u8u32_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i32>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i32> @llvm.umin.v4i32(<vscale x 4 x i32> %1, <vscale x 4 x i32> splat (i32 255))
+  %3 = trunc <vscale x 4 x i32> %2 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %3, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u32_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i32>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i32> @llvm.smax.v4i32(<vscale x 4 x i32> %1, <vscale x 4 x i32> splat (i32 0))
+  %3 = tail call <vscale x 4 x i32> @llvm.smin.v4i32(<vscale x 4 x i32> %2, <vscale x 4 x i32> splat (i32 255))
+  %4 = trunc <vscale x 4 x i32> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u32_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u32_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl2re32.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v10, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i32>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i32> @llvm.smin.v4i32(<vscale x 4 x i32> %1, <vscale x 4 x i32> splat (i32 255))
+  %3 = tail call <vscale x 4 x i32> @llvm.smax.v4i32(<vscale x 4 x i32> %2, <vscale x 4 x i32> splat (i32 0))
+  %4 = trunc <vscale x 4 x i32> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 -128))
+  %3 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 127))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i8i64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i8i64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    li a0, 127
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, -128
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 127))
+  %3 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 -128))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_u8u64_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.umin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 255))
+  %3 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %3, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 0))
+  %3 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 255))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u8u64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u8u64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    li a0, 255
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vmax.vx v8, v8, zero
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 255))
+  %3 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 0))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i8>
+  store <vscale x 4 x i8> %4, ptr %y, align 8
+  ret void
+}
+
+define void @trunc_sat_i16i64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i16i64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 1048568
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vs1r.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 32
+  %2 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 -32768))
+  %3 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 32767))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i16>
+  store <vscale x 4 x i16> %4, ptr %y, align 16
+  ret void
+}
+
+define void @trunc_sat_i16i64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_i16i64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 8
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 1048568
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vs1r.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 32
+  %2 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 32767))
+  %3 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 -32768))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i16>
+  store <vscale x 4 x i16> %4, ptr %y, align 16
+  ret void
+}
+
+define void @trunc_sat_u16u64_min(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vminu.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vs1r.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 32
+  %2 = tail call <vscale x 4 x i64> @llvm.umin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 65535))
+  %3 = trunc <vscale x 4 x i64> %2 to <vscale x 4 x i16>
+  store <vscale x 4 x i16> %3, ptr %y, align 16
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u16u64_maxmin(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_maxmin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vs1r.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 1))
+  %3 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 65535))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i16>
+  store <vscale x 4 x i16> %4, ptr %y, align 8
+  ret void
+}
+
+; FIXME: This can be a signed vmax followed by vnclipu.
+define void @trunc_sat_u16u64_minmax(ptr %x, ptr %y) {
+; CHECK-LABEL: trunc_sat_u16u64_minmax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl4re64.v v8, (a0)
+; CHECK-NEXT:    lui a0, 16
+; CHECK-NEXT:    addiw a0, a0, -1
+; CHECK-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vmin.vx v8, v8, a0
+; CHECK-NEXT:    li a0, 50
+; CHECK-NEXT:    vmax.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v12, v8, 0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vs1r.v v8, (a1)
+; CHECK-NEXT:    ret
+  %1 = load <vscale x 4 x i64>, ptr %x, align 16
+  %2 = tail call <vscale x 4 x i64> @llvm.smin.v4i64(<vscale x 4 x i64> %1, <vscale x 4 x i64> splat (i64 65535))
+  %3 = tail call <vscale x 4 x i64> @llvm.smax.v4i64(<vscale x 4 x i64> %2, <vscale x 4 x i64> splat (i64 50))
+  %4 = trunc <vscale x 4 x i64> %3 to <vscale x 4 x i16>
+  store <vscale x 4 x i16> %4, ptr %y, align 8
+  ret void
+}


        


More information about the llvm-commits mailing list