[llvm] e1acbda - [AArch64][RISCV] Fix expected smulo/umulo test output
Fraser Cormack via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 18 04:06:37 PST 2021
Author: Fraser Cormack
Date: 2021-11-18T11:57:26Z
New Revision: e1acbda158b3f35d7905787f04d8c2f64cfa8c8e
URL: https://github.com/llvm/llvm-project/commit/e1acbda158b3f35d7905787f04d8c2f64cfa8c8e
DIFF: https://github.com/llvm/llvm-project/commit/e1acbda158b3f35d7905787f04d8c2f64cfa8c8e.diff
LOG: [AArch64][RISCV] Fix expected smulo/umulo test output
These tests were introduced in D109809 which I pushed on behalf of
@tangxingxin1008. I must have not understood the correct arcanist
workflow for this and as such may have locally tested a stale build.
This patch fixes the issue by re-running update_llc_test_checks.py on
all four tests.
Added:
Modified:
llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
index 60d72118426c..82a3059251f9 100644
--- a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll
@@ -86,11 +86,12 @@ define <vscale x 16 x i8> @smulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: smulh z2.b, p0/m, z2.b, z1.b
-; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: asr z1.b, z0.b, #7
-; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, z1.b
-; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z2.b, p0/m, z2.b, z1.b
+; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: asr z1.b, z2.b, #7
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z1.b
+; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.smul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
%b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
@@ -109,14 +110,15 @@ define <vscale x 32 x i8> @smulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i
; CHECK-NEXT: smulh z4.b, p0/m, z4.b, z3.b
; CHECK-NEXT: mul z1.b, p0/m, z1.b, z3.b
; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: smulh z3.b, p0/m, z3.b, z2.b
-; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b
-; CHECK-NEXT: asr z2.b, z1.b, #7
-; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, z2.b
-; CHECK-NEXT: asr z2.b, z0.b, #7
-; CHECK-NEXT: cmpne p0.b, p0/z, z3.b, z2.b
-; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z3.b, p0/m, z3.b, z2.b
+; CHECK-NEXT: asr z5.b, z1.b, #7
+; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT: asr z2.b, z3.b, #7
+; CHECK-NEXT: cmpne p1.b, p0/z, z4.b, z5.b
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z2.b
; CHECK-NEXT: mov z1.b, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z3.b, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.smul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
%b = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 0
@@ -135,26 +137,28 @@ define <vscale x 64 x i8> @smulo_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i
; CHECK-NEXT: smulh z24.b, p0/m, z24.b, z7.b
; CHECK-NEXT: mul z3.b, p0/m, z3.b, z7.b
; CHECK-NEXT: movprfx z7, z2
-; CHECK-NEXT: smulh z7.b, p0/m, z7.b, z6.b
-; CHECK-NEXT: mul z2.b, p0/m, z2.b, z6.b
+; CHECK-NEXT: mul z7.b, p0/m, z7.b, z6.b
+; CHECK-NEXT: smulh z2.b, p0/m, z2.b, z6.b
+; CHECK-NEXT: asr z6.b, z7.b, #7
+; CHECK-NEXT: cmpne p2.b, p0/z, z2.b, z6.b
; CHECK-NEXT: movprfx z6, z1
; CHECK-NEXT: smulh z6.b, p0/m, z6.b, z5.b
; CHECK-NEXT: mul z1.b, p0/m, z1.b, z5.b
-; CHECK-NEXT: movprfx z5, z0
-; CHECK-NEXT: smulh z5.b, p0/m, z5.b, z4.b
-; CHECK-NEXT: mul z0.b, p0/m, z0.b, z4.b
-; CHECK-NEXT: asr z4.b, z3.b, #7
-; CHECK-NEXT: cmpne p1.b, p0/z, z24.b, z4.b
+; CHECK-NEXT: asr z25.b, z3.b, #7
+; CHECK-NEXT: asr z5.b, z1.b, #7
+; CHECK-NEXT: movprfx z2, z0
+; CHECK-NEXT: mul z2.b, p0/m, z2.b, z4.b
+; CHECK-NEXT: smulh z0.b, p0/m, z0.b, z4.b
; CHECK-NEXT: asr z4.b, z2.b, #7
-; CHECK-NEXT: asr z24.b, z1.b, #7
-; CHECK-NEXT: cmpne p2.b, p0/z, z7.b, z4.b
-; CHECK-NEXT: asr z4.b, z0.b, #7
-; CHECK-NEXT: cmpne p3.b, p0/z, z6.b, z24.b
-; CHECK-NEXT: cmpne p0.b, p0/z, z5.b, z4.b
-; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
+; CHECK-NEXT: cmpne p1.b, p0/z, z24.b, z25.b
+; CHECK-NEXT: cmpne p3.b, p0/z, z6.b, z5.b
+; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, z4.b
+; CHECK-NEXT: mov z7.b, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.b, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z2.b, p2/m, #0 // =0x0
; CHECK-NEXT: mov z3.b, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: mov z2.d, z7.d
; CHECK-NEXT: ret
%a = call { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.smul.with.overflow.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y)
%b = extractvalue { <vscale x 64 x i8>, <vscale x 64 x i1> } %a, 0
@@ -222,11 +226,12 @@ define <vscale x 8 x i16> @smulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: asr z1.h, z0.h, #15
-; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, z1.h
-; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: asr z1.h, z2.h, #15
+; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z1.h
+; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
%b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
@@ -245,14 +250,15 @@ define <vscale x 16 x i16> @smulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16
; CHECK-NEXT: smulh z4.h, p0/m, z4.h, z3.h
; CHECK-NEXT: mul z1.h, p0/m, z1.h, z3.h
; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: smulh z3.h, p0/m, z3.h, z2.h
-; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h
-; CHECK-NEXT: asr z2.h, z1.h, #15
-; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, z2.h
-; CHECK-NEXT: asr z2.h, z0.h, #15
-; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, z2.h
-; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z3.h, p0/m, z3.h, z2.h
+; CHECK-NEXT: asr z5.h, z1.h, #15
+; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT: asr z2.h, z3.h, #15
+; CHECK-NEXT: cmpne p1.h, p0/z, z4.h, z5.h
+; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z2.h
; CHECK-NEXT: mov z1.h, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z3.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.smul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
%b = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 0
@@ -271,26 +277,28 @@ define <vscale x 32 x i16> @smulo_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32
; CHECK-NEXT: smulh z24.h, p0/m, z24.h, z7.h
; CHECK-NEXT: mul z3.h, p0/m, z3.h, z7.h
; CHECK-NEXT: movprfx z7, z2
-; CHECK-NEXT: smulh z7.h, p0/m, z7.h, z6.h
-; CHECK-NEXT: mul z2.h, p0/m, z2.h, z6.h
+; CHECK-NEXT: mul z7.h, p0/m, z7.h, z6.h
+; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z6.h
+; CHECK-NEXT: asr z6.h, z7.h, #15
+; CHECK-NEXT: cmpne p2.h, p0/z, z2.h, z6.h
; CHECK-NEXT: movprfx z6, z1
; CHECK-NEXT: smulh z6.h, p0/m, z6.h, z5.h
; CHECK-NEXT: mul z1.h, p0/m, z1.h, z5.h
-; CHECK-NEXT: movprfx z5, z0
-; CHECK-NEXT: smulh z5.h, p0/m, z5.h, z4.h
-; CHECK-NEXT: mul z0.h, p0/m, z0.h, z4.h
-; CHECK-NEXT: asr z4.h, z3.h, #15
-; CHECK-NEXT: cmpne p1.h, p0/z, z24.h, z4.h
+; CHECK-NEXT: asr z25.h, z3.h, #15
+; CHECK-NEXT: asr z5.h, z1.h, #15
+; CHECK-NEXT: movprfx z2, z0
+; CHECK-NEXT: mul z2.h, p0/m, z2.h, z4.h
+; CHECK-NEXT: smulh z0.h, p0/m, z0.h, z4.h
; CHECK-NEXT: asr z4.h, z2.h, #15
-; CHECK-NEXT: asr z24.h, z1.h, #15
-; CHECK-NEXT: cmpne p2.h, p0/z, z7.h, z4.h
-; CHECK-NEXT: asr z4.h, z0.h, #15
-; CHECK-NEXT: cmpne p3.h, p0/z, z6.h, z24.h
-; CHECK-NEXT: cmpne p0.h, p0/z, z5.h, z4.h
-; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
+; CHECK-NEXT: cmpne p1.h, p0/z, z24.h, z25.h
+; CHECK-NEXT: cmpne p3.h, p0/z, z6.h, z5.h
+; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, z4.h
+; CHECK-NEXT: mov z7.h, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.h, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z2.h, p2/m, #0 // =0x0
; CHECK-NEXT: mov z3.h, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: mov z2.d, z7.d
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.smul.with.overflow.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y)
%b = extractvalue { <vscale x 32 x i16>, <vscale x 32 x i1> } %a, 0
@@ -332,11 +340,12 @@ define <vscale x 4 x i32> @smulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: asr z1.s, z0.s, #31
-; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, z1.s
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: asr z1.s, z2.s, #31
+; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
%b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
@@ -355,14 +364,15 @@ define <vscale x 8 x i32> @smulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i3
; CHECK-NEXT: smulh z4.s, p0/m, z4.s, z3.s
; CHECK-NEXT: mul z1.s, p0/m, z1.s, z3.s
; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: smulh z3.s, p0/m, z3.s, z2.s
-; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s
-; CHECK-NEXT: asr z2.s, z1.s, #31
-; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, z2.s
-; CHECK-NEXT: asr z2.s, z0.s, #31
-; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, z2.s
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z3.s, p0/m, z3.s, z2.s
+; CHECK-NEXT: asr z5.s, z1.s, #31
+; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT: asr z2.s, z3.s, #31
+; CHECK-NEXT: cmpne p1.s, p0/z, z4.s, z5.s
+; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z2.s
; CHECK-NEXT: mov z1.s, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z3.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
%b = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 0
@@ -381,26 +391,28 @@ define <vscale x 16 x i32> @smulo_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16
; CHECK-NEXT: smulh z24.s, p0/m, z24.s, z7.s
; CHECK-NEXT: mul z3.s, p0/m, z3.s, z7.s
; CHECK-NEXT: movprfx z7, z2
-; CHECK-NEXT: smulh z7.s, p0/m, z7.s, z6.s
-; CHECK-NEXT: mul z2.s, p0/m, z2.s, z6.s
+; CHECK-NEXT: mul z7.s, p0/m, z7.s, z6.s
+; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z6.s
+; CHECK-NEXT: asr z6.s, z7.s, #31
+; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, z6.s
; CHECK-NEXT: movprfx z6, z1
; CHECK-NEXT: smulh z6.s, p0/m, z6.s, z5.s
; CHECK-NEXT: mul z1.s, p0/m, z1.s, z5.s
-; CHECK-NEXT: movprfx z5, z0
-; CHECK-NEXT: smulh z5.s, p0/m, z5.s, z4.s
-; CHECK-NEXT: mul z0.s, p0/m, z0.s, z4.s
-; CHECK-NEXT: asr z4.s, z3.s, #31
-; CHECK-NEXT: cmpne p1.s, p0/z, z24.s, z4.s
+; CHECK-NEXT: asr z25.s, z3.s, #31
+; CHECK-NEXT: asr z5.s, z1.s, #31
+; CHECK-NEXT: movprfx z2, z0
+; CHECK-NEXT: mul z2.s, p0/m, z2.s, z4.s
+; CHECK-NEXT: smulh z0.s, p0/m, z0.s, z4.s
; CHECK-NEXT: asr z4.s, z2.s, #31
-; CHECK-NEXT: asr z24.s, z1.s, #31
-; CHECK-NEXT: cmpne p2.s, p0/z, z7.s, z4.s
-; CHECK-NEXT: asr z4.s, z0.s, #31
-; CHECK-NEXT: cmpne p3.s, p0/z, z6.s, z24.s
-; CHECK-NEXT: cmpne p0.s, p0/z, z5.s, z4.s
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: cmpne p1.s, p0/z, z24.s, z25.s
+; CHECK-NEXT: cmpne p3.s, p0/z, z6.s, z5.s
+; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z4.s
+; CHECK-NEXT: mov z7.s, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.s, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0
; CHECK-NEXT: mov z3.s, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: mov z2.d, z7.d
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.smul.with.overflow.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y)
%b = extractvalue { <vscale x 16 x i32>, <vscale x 16 x i1> } %a, 0
@@ -416,11 +428,12 @@ define <vscale x 2 x i64> @smulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: asr z1.d, z0.d, #63
-; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, z1.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: asr z1.d, z2.d, #63
+; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d
+; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
%b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
@@ -439,14 +452,15 @@ define <vscale x 4 x i64> @smulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i6
; CHECK-NEXT: smulh z4.d, p0/m, z4.d, z3.d
; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d
; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: smulh z3.d, p0/m, z3.d, z2.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: asr z2.d, z1.d, #63
-; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, z2.d
-; CHECK-NEXT: asr z2.d, z0.d, #63
-; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, z2.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mul z3.d, p0/m, z3.d, z2.d
+; CHECK-NEXT: asr z5.d, z1.d, #63
+; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT: asr z2.d, z3.d, #63
+; CHECK-NEXT: cmpne p1.d, p0/z, z4.d, z5.d
+; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z2.d
; CHECK-NEXT: mov z1.d, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z3.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z3.d
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
%b = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 0
@@ -465,26 +479,28 @@ define <vscale x 8 x i64> @smulo_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i6
; CHECK-NEXT: smulh z24.d, p0/m, z24.d, z7.d
; CHECK-NEXT: mul z3.d, p0/m, z3.d, z7.d
; CHECK-NEXT: movprfx z7, z2
-; CHECK-NEXT: smulh z7.d, p0/m, z7.d, z6.d
-; CHECK-NEXT: mul z2.d, p0/m, z2.d, z6.d
+; CHECK-NEXT: mul z7.d, p0/m, z7.d, z6.d
+; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z6.d
+; CHECK-NEXT: asr z6.d, z7.d, #63
+; CHECK-NEXT: cmpne p2.d, p0/z, z2.d, z6.d
; CHECK-NEXT: movprfx z6, z1
; CHECK-NEXT: smulh z6.d, p0/m, z6.d, z5.d
; CHECK-NEXT: mul z1.d, p0/m, z1.d, z5.d
-; CHECK-NEXT: movprfx z5, z0
-; CHECK-NEXT: smulh z5.d, p0/m, z5.d, z4.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z4.d
-; CHECK-NEXT: asr z4.d, z3.d, #63
-; CHECK-NEXT: cmpne p1.d, p0/z, z24.d, z4.d
+; CHECK-NEXT: asr z25.d, z3.d, #63
+; CHECK-NEXT: asr z5.d, z1.d, #63
+; CHECK-NEXT: movprfx z2, z0
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z4.d
+; CHECK-NEXT: smulh z0.d, p0/m, z0.d, z4.d
; CHECK-NEXT: asr z4.d, z2.d, #63
-; CHECK-NEXT: asr z24.d, z1.d, #63
-; CHECK-NEXT: cmpne p2.d, p0/z, z7.d, z4.d
-; CHECK-NEXT: asr z4.d, z0.d, #63
-; CHECK-NEXT: cmpne p3.d, p0/z, z6.d, z24.d
-; CHECK-NEXT: cmpne p0.d, p0/z, z5.d, z4.d
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: cmpne p1.d, p0/z, z24.d, z25.d
+; CHECK-NEXT: cmpne p3.d, p0/z, z6.d, z5.d
+; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z4.d
+; CHECK-NEXT: mov z7.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
; CHECK-NEXT: mov z3.d, p1/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: mov z2.d, z7.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y)
%b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i1> } %a, 0
diff --git a/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll
index 44eac185a97e..31b9b2a9929e 100644
--- a/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll
+++ b/llvm/test/CodeGen/AArch64/sve-umulo-sdnode.ll
@@ -10,13 +10,14 @@ define <vscale x 2 x i8> @umulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %
; CHECK-NEXT: and z1.d, z1.d, #0xff
; CHECK-NEXT: and z0.d, z0.d, #0xff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: lsr z1.d, z0.d, #8
-; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: lsr z1.d, z2.d, #8
+; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0
; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
%b = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 0
@@ -34,13 +35,14 @@ define <vscale x 4 x i8> @umulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %
; CHECK-NEXT: and z1.s, z1.s, #0xff
; CHECK-NEXT: and z0.s, z0.s, #0xff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: lsr z1.s, z0.s, #8
-; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0
+; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: lsr z1.s, z2.s, #8
+; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
%b = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 0
@@ -58,13 +60,14 @@ define <vscale x 8 x i8> @umulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %
; CHECK-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEXT: and z0.h, z0.h, #0xff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: lsr z1.h, z0.h, #8
-; CHECK-NEXT: cmpne p1.h, p0/z, z2.h, #0
+; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: lsr z1.h, z2.h, #8
+; CHECK-NEXT: cmpne p1.h, p0/z, z0.h, #0
; CHECK-NEXT: cmpne p2.h, p0/z, z1.h, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
%b = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 0
@@ -80,11 +83,10 @@ define <vscale x 16 x i8> @umulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: mul z2.b, p0/m, z2.b, z1.b
-; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
-; CHECK-NEXT: mov z2.b, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: umulh z2.b, p0/m, z2.b, z1.b
+; CHECK-NEXT: mul z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT: cmpne p0.b, p0/z, z2.b, #0
+; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
%b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
@@ -101,15 +103,14 @@ define <vscale x 32 x i8> @umulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: movprfx z4, z1
; CHECK-NEXT: mul z4.b, p0/m, z4.b, z3.b
-; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z1.b
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.b, p0/m, z1.b, z2.b
-; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z2.b
-; CHECK-NEXT: cmpne p1.b, p0/z, z3.b, #0
-; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
-; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z3.b
+; CHECK-NEXT: movprfx z3, z0
+; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z2.b
+; CHECK-NEXT: cmpne p1.b, p0/z, z1.b, #0
+; CHECK-NEXT: mul z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT: cmpne p0.b, p0/z, z3.b, #0
; CHECK-NEXT: mov z4.b, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.d, z4.d
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
@@ -127,27 +128,25 @@ define <vscale x 64 x i8> @umulo_nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: movprfx z24, z3
; CHECK-NEXT: mul z24.b, p0/m, z24.b, z7.b
-; CHECK-NEXT: umulh z7.b, p0/m, z7.b, z3.b
+; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z7.b
+; CHECK-NEXT: cmpne p1.b, p0/z, z3.b, #0
; CHECK-NEXT: movprfx z3, z2
-; CHECK-NEXT: mul z3.b, p0/m, z3.b, z6.b
-; CHECK-NEXT: umulh z6.b, p0/m, z6.b, z2.b
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: mul z2.b, p0/m, z2.b, z5.b
-; CHECK-NEXT: umulh z5.b, p0/m, z5.b, z1.b
+; CHECK-NEXT: umulh z3.b, p0/m, z3.b, z6.b
+; CHECK-NEXT: cmpne p2.b, p0/z, z3.b, #0
+; CHECK-NEXT: movprfx z3, z1
+; CHECK-NEXT: mul z3.b, p0/m, z3.b, z5.b
+; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z5.b
+; CHECK-NEXT: mul z2.b, p0/m, z2.b, z6.b
+; CHECK-NEXT: cmpne p3.b, p0/z, z1.b, #0
; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.b, p0/m, z1.b, z4.b
-; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z4.b
-; CHECK-NEXT: cmpne p1.b, p0/z, z7.b, #0
-; CHECK-NEXT: cmpne p2.b, p0/z, z6.b, #0
-; CHECK-NEXT: cmpne p3.b, p0/z, z5.b, #0
-; CHECK-NEXT: cmpne p0.b, p0/z, z0.b, #0
-; CHECK-NEXT: mov z1.b, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z2.b, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z3.b, p2/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.b, p0/m, z1.b, z4.b
+; CHECK-NEXT: mul z0.b, p0/m, z0.b, z4.b
+; CHECK-NEXT: cmpne p0.b, p0/z, z1.b, #0
+; CHECK-NEXT: mov z3.b, p3/m, #0 // =0x0
; CHECK-NEXT: mov z24.b, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
-; CHECK-NEXT: mov z1.d, z2.d
-; CHECK-NEXT: mov z2.d, z3.d
+; CHECK-NEXT: mov z0.b, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.b, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, z3.d
; CHECK-NEXT: mov z3.d, z24.d
; CHECK-NEXT: ret
%a = call { <vscale x 64 x i8>, <vscale x 64 x i1> } @llvm.umul.with.overflow.nxv64i8(<vscale x 64 x i8> %x, <vscale x 64 x i8> %y)
@@ -166,13 +165,14 @@ define <vscale x 2 x i16> @umulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1
; CHECK-NEXT: and z1.d, z1.d, #0xffff
; CHECK-NEXT: and z0.d, z0.d, #0xffff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: lsr z1.d, z0.d, #16
-; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: lsr z1.d, z2.d, #16
+; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0
; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
%b = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 0
@@ -190,13 +190,14 @@ define <vscale x 4 x i16> @umulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i1
; CHECK-NEXT: and z1.s, z1.s, #0xffff
; CHECK-NEXT: and z0.s, z0.s, #0xffff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: lsr z1.s, z0.s, #16
-; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0
+; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: lsr z1.s, z2.s, #16
+; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
%b = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 0
@@ -212,11 +213,10 @@ define <vscale x 8 x i16> @umulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: mul z2.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
-; CHECK-NEXT: mov z2.h, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: umulh z2.h, p0/m, z2.h, z1.h
+; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT: cmpne p0.h, p0/z, z2.h, #0
+; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
%b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
@@ -233,15 +233,14 @@ define <vscale x 16 x i16> @umulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: movprfx z4, z1
; CHECK-NEXT: mul z4.h, p0/m, z4.h, z3.h
-; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z1.h
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z2.h
-; CHECK-NEXT: cmpne p1.h, p0/z, z3.h, #0
-; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
-; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT: movprfx z3, z0
+; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z2.h
+; CHECK-NEXT: cmpne p1.h, p0/z, z1.h, #0
+; CHECK-NEXT: mul z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, #0
; CHECK-NEXT: mov z4.h, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.d, z4.d
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
@@ -259,27 +258,25 @@ define <vscale x 32 x i16> @umulo_nxv32i16(<vscale x 32 x i16> %x, <vscale x 32
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: movprfx z24, z3
; CHECK-NEXT: mul z24.h, p0/m, z24.h, z7.h
-; CHECK-NEXT: umulh z7.h, p0/m, z7.h, z3.h
+; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z7.h
+; CHECK-NEXT: cmpne p1.h, p0/z, z3.h, #0
; CHECK-NEXT: movprfx z3, z2
-; CHECK-NEXT: mul z3.h, p0/m, z3.h, z6.h
-; CHECK-NEXT: umulh z6.h, p0/m, z6.h, z2.h
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: mul z2.h, p0/m, z2.h, z5.h
-; CHECK-NEXT: umulh z5.h, p0/m, z5.h, z1.h
+; CHECK-NEXT: umulh z3.h, p0/m, z3.h, z6.h
+; CHECK-NEXT: cmpne p2.h, p0/z, z3.h, #0
+; CHECK-NEXT: movprfx z3, z1
+; CHECK-NEXT: mul z3.h, p0/m, z3.h, z5.h
+; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z5.h
+; CHECK-NEXT: mul z2.h, p0/m, z2.h, z6.h
+; CHECK-NEXT: cmpne p3.h, p0/z, z1.h, #0
; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.h, p0/m, z1.h, z4.h
-; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z4.h
-; CHECK-NEXT: cmpne p1.h, p0/z, z7.h, #0
-; CHECK-NEXT: cmpne p2.h, p0/z, z6.h, #0
-; CHECK-NEXT: cmpne p3.h, p0/z, z5.h, #0
-; CHECK-NEXT: cmpne p0.h, p0/z, z0.h, #0
-; CHECK-NEXT: mov z1.h, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z2.h, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z3.h, p2/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.h, p0/m, z1.h, z4.h
+; CHECK-NEXT: mul z0.h, p0/m, z0.h, z4.h
+; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, #0
+; CHECK-NEXT: mov z3.h, p3/m, #0 // =0x0
; CHECK-NEXT: mov z24.h, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
-; CHECK-NEXT: mov z1.d, z2.d
-; CHECK-NEXT: mov z2.d, z3.d
+; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.h, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, z3.d
; CHECK-NEXT: mov z3.d, z24.d
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i16>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i16(<vscale x 32 x i16> %x, <vscale x 32 x i16> %y)
@@ -298,13 +295,14 @@ define <vscale x 2 x i32> @umulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i3
; CHECK-NEXT: and z1.d, z1.d, #0xffffffff
; CHECK-NEXT: and z0.d, z0.d, #0xffffffff
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: lsr z1.d, z0.d, #32
-; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: lsr z1.d, z2.d, #32
+; CHECK-NEXT: cmpne p1.d, p0/z, z0.d, #0
; CHECK-NEXT: cmpne p2.d, p0/z, z1.d, #0
; CHECK-NEXT: orr p0.b, p0/z, p2.b, p1.b
-; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z0.d, z2.d
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
%b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
@@ -320,11 +318,10 @@ define <vscale x 4 x i32> @umulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: mul z2.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
-; CHECK-NEXT: mov z2.s, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: umulh z2.s, p0/m, z2.s, z1.s
+; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0
+; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
%b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
@@ -341,15 +338,14 @@ define <vscale x 8 x i32> @umulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i3
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: movprfx z4, z1
; CHECK-NEXT: mul z4.s, p0/m, z4.s, z3.s
-; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z1.s
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.s, p0/m, z1.s, z2.s
-; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z2.s
-; CHECK-NEXT: cmpne p1.s, p0/z, z3.s, #0
-; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
-; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT: movprfx z3, z0
+; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z2.s
+; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0
+; CHECK-NEXT: mul z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, #0
; CHECK-NEXT: mov z4.s, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.d, z4.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
@@ -367,27 +363,25 @@ define <vscale x 16 x i32> @umulo_nxv16i32(<vscale x 16 x i32> %x, <vscale x 16
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: movprfx z24, z3
; CHECK-NEXT: mul z24.s, p0/m, z24.s, z7.s
-; CHECK-NEXT: umulh z7.s, p0/m, z7.s, z3.s
+; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z7.s
+; CHECK-NEXT: cmpne p1.s, p0/z, z3.s, #0
; CHECK-NEXT: movprfx z3, z2
-; CHECK-NEXT: mul z3.s, p0/m, z3.s, z6.s
-; CHECK-NEXT: umulh z6.s, p0/m, z6.s, z2.s
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: mul z2.s, p0/m, z2.s, z5.s
-; CHECK-NEXT: umulh z5.s, p0/m, z5.s, z1.s
+; CHECK-NEXT: umulh z3.s, p0/m, z3.s, z6.s
+; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, #0
+; CHECK-NEXT: movprfx z3, z1
+; CHECK-NEXT: mul z3.s, p0/m, z3.s, z5.s
+; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z5.s
+; CHECK-NEXT: mul z2.s, p0/m, z2.s, z6.s
+; CHECK-NEXT: cmpne p3.s, p0/z, z1.s, #0
; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.s, p0/m, z1.s, z4.s
-; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z4.s
-; CHECK-NEXT: cmpne p1.s, p0/z, z7.s, #0
-; CHECK-NEXT: cmpne p2.s, p0/z, z6.s, #0
-; CHECK-NEXT: cmpne p3.s, p0/z, z5.s, #0
-; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, #0
-; CHECK-NEXT: mov z1.s, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z2.s, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z3.s, p2/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.s, p0/m, z1.s, z4.s
+; CHECK-NEXT: mul z0.s, p0/m, z0.s, z4.s
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0
; CHECK-NEXT: mov z24.s, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
-; CHECK-NEXT: mov z1.d, z2.d
-; CHECK-NEXT: mov z2.d, z3.d
+; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, z3.d
; CHECK-NEXT: mov z3.d, z24.d
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i32>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i32(<vscale x 16 x i32> %x, <vscale x 16 x i32> %y)
@@ -404,11 +398,10 @@ define <vscale x 2 x i64> @umulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: movprfx z2, z0
-; CHECK-NEXT: mul z2.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: mov z2.d, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z2.d
+; CHECK-NEXT: umulh z2.d, p0/m, z2.d, z1.d
+; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT: cmpne p0.d, p0/z, z2.d, #0
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
%b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
@@ -425,15 +418,14 @@ define <vscale x 4 x i64> @umulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i6
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: movprfx z4, z1
; CHECK-NEXT: mul z4.d, p0/m, z4.d, z3.d
-; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z1.d
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.d, p0/m, z1.d, z2.d
-; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0
-; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT: movprfx z3, z0
+; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z2.d
+; CHECK-NEXT: cmpne p1.d, p0/z, z1.d, #0
+; CHECK-NEXT: mul z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, #0
; CHECK-NEXT: mov z4.d, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT: mov z1.d, z4.d
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
@@ -451,27 +443,25 @@ define <vscale x 8 x i64> @umulo_nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i6
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: movprfx z24, z3
; CHECK-NEXT: mul z24.d, p0/m, z24.d, z7.d
-; CHECK-NEXT: umulh z7.d, p0/m, z7.d, z3.d
+; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z7.d
+; CHECK-NEXT: cmpne p1.d, p0/z, z3.d, #0
; CHECK-NEXT: movprfx z3, z2
-; CHECK-NEXT: mul z3.d, p0/m, z3.d, z6.d
-; CHECK-NEXT: umulh z6.d, p0/m, z6.d, z2.d
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: mul z2.d, p0/m, z2.d, z5.d
-; CHECK-NEXT: umulh z5.d, p0/m, z5.d, z1.d
+; CHECK-NEXT: umulh z3.d, p0/m, z3.d, z6.d
+; CHECK-NEXT: cmpne p2.d, p0/z, z3.d, #0
+; CHECK-NEXT: movprfx z3, z1
+; CHECK-NEXT: mul z3.d, p0/m, z3.d, z5.d
+; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z5.d
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z6.d
+; CHECK-NEXT: cmpne p3.d, p0/z, z1.d, #0
; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: mul z1.d, p0/m, z1.d, z4.d
-; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z4.d
-; CHECK-NEXT: cmpne p1.d, p0/z, z7.d, #0
-; CHECK-NEXT: cmpne p2.d, p0/z, z6.d, #0
-; CHECK-NEXT: cmpne p3.d, p0/z, z5.d, #0
-; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, #0
-; CHECK-NEXT: mov z1.d, p0/m, #0 // =0x0
-; CHECK-NEXT: mov z2.d, p3/m, #0 // =0x0
-; CHECK-NEXT: mov z3.d, p2/m, #0 // =0x0
+; CHECK-NEXT: umulh z1.d, p0/m, z1.d, z4.d
+; CHECK-NEXT: mul z0.d, p0/m, z0.d, z4.d
+; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, #0
+; CHECK-NEXT: mov z3.d, p3/m, #0 // =0x0
; CHECK-NEXT: mov z24.d, p1/m, #0 // =0x0
-; CHECK-NEXT: mov z0.d, z1.d
-; CHECK-NEXT: mov z1.d, z2.d
-; CHECK-NEXT: mov z2.d, z3.d
+; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0
+; CHECK-NEXT: mov z2.d, p2/m, #0 // =0x0
+; CHECK-NEXT: mov z1.d, z3.d
; CHECK-NEXT: mov z3.d, z24.d
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i64>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i64(<vscale x 8 x i64> %x, <vscale x 8 x i64> %y)
diff --git a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
index cbdd09ff3739..95cb0b391ec5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/smulo-sdnode.ll
@@ -7,11 +7,11 @@ define <vscale x 1 x i8> @smulo_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %
; CHECK-LABEL: smulo_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 7
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 7
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.smul.with.overflow.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y)
%b = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 0
@@ -26,11 +26,11 @@ define <vscale x 2 x i8> @smulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %
; CHECK-LABEL: smulo_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 7
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 7
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
%b = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 0
@@ -45,11 +45,11 @@ define <vscale x 4 x i8> @smulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %
; CHECK-LABEL: smulo_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 7
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 7
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
%b = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 0
@@ -64,11 +64,11 @@ define <vscale x 8 x i8> @smulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %
; CHECK-LABEL: smulo_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 7
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 7
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
%b = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 0
@@ -83,11 +83,11 @@ define <vscale x 16 x i8> @smulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK-LABEL: smulo_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmulh.vv v26, v8, v10
-; CHECK-NEXT: vmul.vv v28, v8, v10
-; CHECK-NEXT: vsra.vi v30, v28, 7
-; CHECK-NEXT: vmsne.vv v0, v26, v30
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulh.vv v12, v8, v10
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vsra.vi v10, v8, 7
+; CHECK-NEXT: vmsne.vv v0, v12, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.smul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
%b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
@@ -102,10 +102,10 @@ define <vscale x 32 x i8> @smulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i
; CHECK-LABEL: smulo_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vmulh.vv v28, v8, v12
+; CHECK-NEXT: vmulh.vv v16, v8, v12
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: vsra.vi v12, v8, 7
-; CHECK-NEXT: vmsne.vv v0, v28, v12
+; CHECK-NEXT: vmsne.vv v0, v16, v12
; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.smul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
@@ -140,11 +140,11 @@ define <vscale x 1 x i16> @smulo_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i1
; CHECK-LABEL: smulo_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 15
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 15
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.smul.with.overflow.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y)
%b = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 0
@@ -159,11 +159,11 @@ define <vscale x 2 x i16> @smulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1
; CHECK-LABEL: smulo_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 15
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 15
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
%b = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 0
@@ -178,11 +178,11 @@ define <vscale x 4 x i16> @smulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i1
; CHECK-LABEL: smulo_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 15
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 15
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
%b = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 0
@@ -197,11 +197,11 @@ define <vscale x 8 x i16> @smulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK-LABEL: smulo_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT: vmulh.vv v26, v8, v10
-; CHECK-NEXT: vmul.vv v28, v8, v10
-; CHECK-NEXT: vsra.vi v30, v28, 15
-; CHECK-NEXT: vmsne.vv v0, v26, v30
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulh.vv v12, v8, v10
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vsra.vi v10, v8, 15
+; CHECK-NEXT: vmsne.vv v0, v12, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
%b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
@@ -216,10 +216,10 @@ define <vscale x 16 x i16> @smulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16
; CHECK-LABEL: smulo_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT: vmulh.vv v28, v8, v12
+; CHECK-NEXT: vmulh.vv v16, v8, v12
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: vsra.vi v12, v8, 15
-; CHECK-NEXT: vmsne.vv v0, v28, v12
+; CHECK-NEXT: vmsne.vv v0, v16, v12
; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.smul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
@@ -254,11 +254,11 @@ define <vscale x 1 x i32> @smulo_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i3
; CHECK-LABEL: smulo_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 31
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 31
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.smul.with.overflow.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y)
%b = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 0
@@ -273,11 +273,11 @@ define <vscale x 2 x i32> @smulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i3
; CHECK-LABEL: smulo_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
-; CHECK-NEXT: vsra.vi v27, v26, 31
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vsra.vi v9, v8, 31
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
%b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
@@ -292,11 +292,11 @@ define <vscale x 4 x i32> @smulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK-LABEL: smulo_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT: vmulh.vv v26, v8, v10
-; CHECK-NEXT: vmul.vv v28, v8, v10
-; CHECK-NEXT: vsra.vi v30, v28, 31
-; CHECK-NEXT: vmsne.vv v0, v26, v30
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulh.vv v12, v8, v10
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vsra.vi v10, v8, 31
+; CHECK-NEXT: vmsne.vv v0, v12, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
%b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
@@ -311,10 +311,10 @@ define <vscale x 8 x i32> @smulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i3
; CHECK-LABEL: smulo_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT: vmulh.vv v28, v8, v12
+; CHECK-NEXT: vmulh.vv v16, v8, v12
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: vsra.vi v12, v8, 31
-; CHECK-NEXT: vmsne.vv v0, v28, v12
+; CHECK-NEXT: vmsne.vv v0, v16, v12
; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.smul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
@@ -349,12 +349,12 @@ define <vscale x 1 x i64> @smulo_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i6
; CHECK-LABEL: smulo_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmulh.vv v25, v8, v9
-; CHECK-NEXT: vmul.vv v26, v8, v9
+; CHECK-NEXT: vmulh.vv v10, v8, v9
+; CHECK-NEXT: vmul.vv v8, v8, v9
; CHECK-NEXT: addi a0, zero, 63
-; CHECK-NEXT: vsra.vx v27, v26, a0
-; CHECK-NEXT: vmsne.vv v0, v25, v27
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vsra.vx v9, v8, a0
+; CHECK-NEXT: vmsne.vv v0, v10, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.smul.with.overflow.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y)
%b = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 0
@@ -369,12 +369,12 @@ define <vscale x 2 x i64> @smulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK-LABEL: smulo_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmulh.vv v26, v8, v10
-; CHECK-NEXT: vmul.vv v28, v8, v10
+; CHECK-NEXT: vmulh.vv v12, v8, v10
+; CHECK-NEXT: vmul.vv v8, v8, v10
; CHECK-NEXT: addi a0, zero, 63
-; CHECK-NEXT: vsra.vx v30, v28, a0
-; CHECK-NEXT: vmsne.vv v0, v26, v30
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vsra.vx v10, v8, a0
+; CHECK-NEXT: vmsne.vv v0, v12, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.smul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
%b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
@@ -389,11 +389,11 @@ define <vscale x 4 x i64> @smulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i6
; CHECK-LABEL: smulo_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmulh.vv v28, v8, v12
+; CHECK-NEXT: vmulh.vv v16, v8, v12
; CHECK-NEXT: vmul.vv v8, v8, v12
; CHECK-NEXT: addi a0, zero, 63
; CHECK-NEXT: vsra.vx v12, v8, a0
-; CHECK-NEXT: vmsne.vv v0, v28, v12
+; CHECK-NEXT: vmsne.vv v0, v16, v12
; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.smul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
diff --git a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll
index 1d100ed7d2fc..ddd9ffbd02de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/umulo-sdnode.ll
@@ -7,10 +7,10 @@ define <vscale x 1 x i8> @umulo_nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %
; CHECK-LABEL: umulo_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i8>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i8(<vscale x 1 x i8> %x, <vscale x 1 x i8> %y)
%b = extractvalue { <vscale x 1 x i8>, <vscale x 1 x i1> } %a, 0
@@ -25,10 +25,10 @@ define <vscale x 2 x i8> @umulo_nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %
; CHECK-LABEL: umulo_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i8>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i8(<vscale x 2 x i8> %x, <vscale x 2 x i8> %y)
%b = extractvalue { <vscale x 2 x i8>, <vscale x 2 x i1> } %a, 0
@@ -43,10 +43,10 @@ define <vscale x 4 x i8> @umulo_nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %
; CHECK-LABEL: umulo_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i8>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i8(<vscale x 4 x i8> %x, <vscale x 4 x i8> %y)
%b = extractvalue { <vscale x 4 x i8>, <vscale x 4 x i1> } %a, 0
@@ -61,10 +61,10 @@ define <vscale x 8 x i8> @umulo_nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %
; CHECK-LABEL: umulo_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i8>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i8(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y)
%b = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i1> } %a, 0
@@ -79,10 +79,10 @@ define <vscale x 16 x i8> @umulo_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i
; CHECK-LABEL: umulo_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmulhu.vv v26, v8, v10
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vmul.vv v26, v8, v10
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulhu.vv v12, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i8>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
%b = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i1> } %a, 0
@@ -97,10 +97,10 @@ define <vscale x 32 x i8> @umulo_nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i
; CHECK-LABEL: umulo_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vmulhu.vv v28, v8, v12
-; CHECK-NEXT: vmsne.vi v0, v28, 0
-; CHECK-NEXT: vmul.vv v28, v8, v12
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulhu.vv v16, v8, v12
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vmul.vv v8, v8, v12
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 32 x i8>, <vscale x 32 x i1> } @llvm.umul.with.overflow.nxv32i8(<vscale x 32 x i8> %x, <vscale x 32 x i8> %y)
%b = extractvalue { <vscale x 32 x i8>, <vscale x 32 x i1> } %a, 0
@@ -133,10 +133,10 @@ define <vscale x 1 x i16> @umulo_nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i1
; CHECK-LABEL: umulo_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i16>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i16(<vscale x 1 x i16> %x, <vscale x 1 x i16> %y)
%b = extractvalue { <vscale x 1 x i16>, <vscale x 1 x i1> } %a, 0
@@ -151,10 +151,10 @@ define <vscale x 2 x i16> @umulo_nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i1
; CHECK-LABEL: umulo_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i16>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i16(<vscale x 2 x i16> %x, <vscale x 2 x i16> %y)
%b = extractvalue { <vscale x 2 x i16>, <vscale x 2 x i1> } %a, 0
@@ -169,10 +169,10 @@ define <vscale x 4 x i16> @umulo_nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i1
; CHECK-LABEL: umulo_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i16>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i16(<vscale x 4 x i16> %x, <vscale x 4 x i16> %y)
%b = extractvalue { <vscale x 4 x i16>, <vscale x 4 x i1> } %a, 0
@@ -187,10 +187,10 @@ define <vscale x 8 x i16> @umulo_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i1
; CHECK-LABEL: umulo_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
-; CHECK-NEXT: vmulhu.vv v26, v8, v10
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vmul.vv v26, v8, v10
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulhu.vv v12, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i16>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
%b = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i1> } %a, 0
@@ -205,10 +205,10 @@ define <vscale x 16 x i16> @umulo_nxv16i16(<vscale x 16 x i16> %x, <vscale x 16
; CHECK-LABEL: umulo_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
-; CHECK-NEXT: vmulhu.vv v28, v8, v12
-; CHECK-NEXT: vmsne.vi v0, v28, 0
-; CHECK-NEXT: vmul.vv v28, v8, v12
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulhu.vv v16, v8, v12
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vmul.vv v8, v8, v12
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 16 x i16>, <vscale x 16 x i1> } @llvm.umul.with.overflow.nxv16i16(<vscale x 16 x i16> %x, <vscale x 16 x i16> %y)
%b = extractvalue { <vscale x 16 x i16>, <vscale x 16 x i1> } %a, 0
@@ -241,10 +241,10 @@ define <vscale x 1 x i32> @umulo_nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i3
; CHECK-LABEL: umulo_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i32>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i32(<vscale x 1 x i32> %x, <vscale x 1 x i32> %y)
%b = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i1> } %a, 0
@@ -259,10 +259,10 @@ define <vscale x 2 x i32> @umulo_nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i3
; CHECK-LABEL: umulo_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
%b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
@@ -277,10 +277,10 @@ define <vscale x 4 x i32> @umulo_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i3
; CHECK-LABEL: umulo_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT: vmulhu.vv v26, v8, v10
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vmul.vv v26, v8, v10
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulhu.vv v12, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
%b = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } %a, 0
@@ -295,10 +295,10 @@ define <vscale x 8 x i32> @umulo_nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i3
; CHECK-LABEL: umulo_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
-; CHECK-NEXT: vmulhu.vv v28, v8, v12
-; CHECK-NEXT: vmsne.vi v0, v28, 0
-; CHECK-NEXT: vmul.vv v28, v8, v12
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulhu.vv v16, v8, v12
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vmul.vv v8, v8, v12
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 8 x i32>, <vscale x 8 x i1> } @llvm.umul.with.overflow.nxv8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y)
%b = extractvalue { <vscale x 8 x i32>, <vscale x 8 x i1> } %a, 0
@@ -331,10 +331,10 @@ define <vscale x 1 x i64> @umulo_nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i6
; CHECK-LABEL: umulo_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmulhu.vv v25, v8, v9
-; CHECK-NEXT: vmsne.vi v0, v25, 0
-; CHECK-NEXT: vmul.vv v25, v8, v9
-; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
+; CHECK-NEXT: vmulhu.vv v10, v8, v9
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmul.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 1 x i64>, <vscale x 1 x i1> } @llvm.umul.with.overflow.nxv1i64(<vscale x 1 x i64> %x, <vscale x 1 x i64> %y)
%b = extractvalue { <vscale x 1 x i64>, <vscale x 1 x i1> } %a, 0
@@ -349,10 +349,10 @@ define <vscale x 2 x i64> @umulo_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i6
; CHECK-LABEL: umulo_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmulhu.vv v26, v8, v10
-; CHECK-NEXT: vmsne.vi v0, v26, 0
-; CHECK-NEXT: vmul.vv v26, v8, v10
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmulhu.vv v12, v8, v10
+; CHECK-NEXT: vmsne.vi v0, v12, 0
+; CHECK-NEXT: vmul.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i64>, <vscale x 2 x i1> } @llvm.umul.with.overflow.nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64> %y)
%b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i1> } %a, 0
@@ -367,10 +367,10 @@ define <vscale x 4 x i64> @umulo_nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i6
; CHECK-LABEL: umulo_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmulhu.vv v28, v8, v12
-; CHECK-NEXT: vmsne.vi v0, v28, 0
-; CHECK-NEXT: vmul.vv v28, v8, v12
-; CHECK-NEXT: vmerge.vim v8, v28, 0, v0
+; CHECK-NEXT: vmulhu.vv v16, v8, v12
+; CHECK-NEXT: vmsne.vi v0, v16, 0
+; CHECK-NEXT: vmul.vv v8, v8, v12
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 4 x i64>, <vscale x 4 x i1> } @llvm.umul.with.overflow.nxv4i64(<vscale x 4 x i64> %x, <vscale x 4 x i64> %y)
%b = extractvalue { <vscale x 4 x i64>, <vscale x 4 x i1> } %a, 0
More information about the llvm-commits
mailing list