[llvm] [RISCV][GISel] Update rv32 s64 G_SADDE legalizer test. (PR #159153)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 16 11:57:22 PDT 2025


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/159153

We were testing 2 s32 G_SADDE instead of a single s64 G_SADDE.

Similar for s128 on RV64.

>From 6787cf78d62b293f15b24420ffd63831e85f20ba Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 16 Sep 2025 11:47:23 -0700
Subject: [PATCH] [RISCV][GISel] Update rv32 s64 G_SADDE legalizer test.

We were testing 2 s32 G_SADDE instead of a single s64 G_SADDE.

Similar for s128 on RV64.
---
 .../legalizer/legalize-sadde-rv32.mir         | 71 ++++++++++--------
 .../legalizer/legalize-sadde-rv64.mir         | 74 +++++++++++--------
 2 files changed, 85 insertions(+), 60 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv32.mir
index 64800fedc9d2a..c67998eb50d4b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv32.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv32.mir
@@ -136,38 +136,49 @@ body:             |
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[AND]]
-    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD1]](s32)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD1]], [[COPY]]
-    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[ADD1]], [[COPY2]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[XOR1]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND1]](s32), [[C1]]
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[ADD2]], [[ICMP]]
-    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
-    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[ADD3]], [[COPY1]]
-    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[ADD3]], [[COPY3]]
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[XOR2]], [[XOR3]]
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND2]](s32), [[C1]]
-    ; CHECK-NEXT: $x10 = COPY [[COPY5]](s32)
-    ; CHECK-NEXT: $x11 = COPY [[COPY6]](s32)
-    ; CHECK-NEXT: $x12 = COPY [[ICMP1]](s32)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[COPY2]]
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY3]]
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ADD2]](s32)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[COPY5]], [[AND]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD3]](s32), [[AND]]
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ADD3]](s32)
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s32) = G_ADD [[COPY6]], [[AND1]]
+    ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s32) = G_ADD [[ADD4]], [[ICMP1]]
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ADD5]](s32)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY7]], [[COPY]]
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY8]], [[COPY1]]
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[COPY7]], [[COPY2]]
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[COPY8]], [[COPY3]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[XOR2]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[XOR1]], [[XOR3]]
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND2]](s32), [[C]]
+    ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND3]](s32), [[C]]
+    ; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[AND3]](s32), [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s32), [[ICMP2]], [[ICMP3]]
+    ; CHECK-NEXT: $x10 = COPY [[COPY7]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[COPY8]](s32)
+    ; CHECK-NEXT: $x12 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %0:_(s32) = COPY $x10
     %1:_(s32) = COPY $x11
-    %2:_(s32) = COPY $x12
-    %3:_(s32) = COPY $x13
-    %4:_(s32) = COPY $x14
-    %5:_(s1)  = G_TRUNC %4(s32)
-    %6:_(s32), %7:_(s1) = G_SADDE %0, %2, %5
-    %8:_(s32), %9:_(s1) = G_SADDE %1, %3, %7
-    %10:_(s32) = G_ANYEXT %9(s1)
-    $x10 = COPY %6(s32)
-    $x11 = COPY %8(s32)
-    $x12 = COPY %10(s32)
-
+    %2:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+    %3:_(s32) = COPY $x12
+    %4:_(s32) = COPY $x13
+    %5:_(s64) = G_MERGE_VALUES %3(s32), %4(s32)
+    %6:_(s32) = COPY $x14
+    %7:_(s1)  = G_TRUNC %6(s32)
+    %8:_(s64), %9:_(s1) = G_SADDE %2, %5, %7
+    %10:_(s32), %11:_(s32) = G_UNMERGE_VALUES %8(s64)
+    %12:_(s32) = G_ANYEXT %9(s1)
+    $x10 = COPY %10(s32)
+    $x11 = COPY %11(s32)
+    $x12 = COPY %12(s32)
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 ...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv64.mir
index db1f50535b526..413aaff9b644b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sadde-rv64.mir
@@ -174,38 +174,52 @@ body:             |
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C]]
-    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ADD]], [[AND]]
-    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ADD1]](s64)
-    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD1]], [[COPY]]
-    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[ADD1]], [[COPY2]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[XOR1]]
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[AND1]](s64), [[C1]]
-    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]]
-    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD [[ADD2]], [[ICMP]]
-    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ADD3]](s64)
-    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[ADD3]], [[COPY1]]
-    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s64) = G_XOR [[ADD3]], [[COPY3]]
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[XOR2]], [[XOR3]]
-    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[AND2]](s64), [[C1]]
-    ; CHECK-NEXT: $x10 = COPY [[COPY5]](s64)
-    ; CHECK-NEXT: $x11 = COPY [[COPY6]](s64)
-    ; CHECK-NEXT: $x12 = COPY [[ICMP1]](s64)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD]](s64), [[COPY2]]
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY [[ADD]](s64)
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[COPY3]]
+    ; CHECK-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[ADD1]], [[ICMP]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY [[ADD2]](s64)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY4]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[C]]
+    ; CHECK-NEXT: [[ADD3:%[0-9]+]]:_(s64) = G_ADD [[COPY5]], [[AND]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[ADD3]](s64), [[AND]]
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY [[ADD3]](s64)
+    ; CHECK-NEXT: [[ADD4:%[0-9]+]]:_(s64) = G_ADD [[COPY6]], [[AND1]]
+    ; CHECK-NEXT: [[ADD5:%[0-9]+]]:_(s64) = G_ADD [[ADD4]], [[ICMP1]]
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s64) = COPY [[ADD5]](s64)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY7]], [[COPY]]
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[COPY8]], [[COPY1]]
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s64) = G_XOR [[COPY7]], [[COPY2]]
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s64) = G_XOR [[COPY8]], [[COPY3]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[XOR2]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[XOR1]], [[XOR3]]
+    ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[AND2]](s64), [[C]]
+    ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s64) = G_ICMP intpred(slt), [[AND3]](s64), [[C]]
+    ; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s64) = G_ICMP intpred(eq), [[AND3]](s64), [[C]]
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP2]](s64)
+    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ICMP3]](s64)
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP4]](s64), [[TRUNC]], [[TRUNC1]]
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
+    ; CHECK-NEXT: $x10 = COPY [[COPY7]](s64)
+    ; CHECK-NEXT: $x11 = COPY [[COPY8]](s64)
+    ; CHECK-NEXT: $x12 = COPY [[ANYEXT]](s64)
     ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
     %0:_(s64) = COPY $x10
     %1:_(s64) = COPY $x11
-    %2:_(s64) = COPY $x12
-    %3:_(s64) = COPY $x13
-    %4:_(s64) = COPY $x14
-    %5:_(s1)  = G_TRUNC %4(s64)
-    %6:_(s64), %7:_(s1) = G_SADDE %0, %2, %5
-    %8:_(s64), %9:_(s1) = G_SADDE %1, %3, %7
-    %10:_(s64) = G_ANYEXT %9(s1)
-    $x10 = COPY %6(s64)
-    $x11 = COPY %8(s64)
-    $x12 = COPY %10(s64)
-
+    %2:_(s128) = G_MERGE_VALUES %0(s64), %1(s64)
+    %3:_(s64) = COPY $x12
+    %4:_(s64) = COPY $x13
+    %5:_(s128) = G_MERGE_VALUES %3(s64), %4(s64)
+    %6:_(s64) = COPY $x14
+    %7:_(s1)  = G_TRUNC %6(s64)
+    %8:_(s128), %9:_(s1) = G_SADDE %2, %5, %7
+    %10:_(s64), %11:_(s64) = G_UNMERGE_VALUES %8(s128)
+    %12:_(s64) = G_ANYEXT %9(s1)
+    $x10 = COPY %10(s64)
+    $x11 = COPY %11(s64)
+    $x12 = COPY %12(s64)
     PseudoRET implicit $x10, implicit $x11, implicit $x12
 ...



More information about the llvm-commits mailing list