[llvm] [RISCV][GISel] Lower G_SSUBE (PR #157855)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 12 11:03:12 PDT 2025


================
@@ -0,0 +1,171 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -run-pass=legalizer %s -o - | FileCheck %s
+
+---
+name:            ssube_i8
+body:             |
+  bb.1:
+    liveins: $x10, $x11, $x12
+
+    ; CHECK-LABEL: name: ssube_i8
+    ; CHECK: liveins: $x10, $x11, $x12
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[ASHR]], [[ASHR1]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[AND]]
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[SUB1]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB1]](s32), [[ASHR2]]
+    ; CHECK-NEXT: $x10 = COPY [[SUB1]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:_(s32) = COPY $x10
+    %1:_(s8) = G_TRUNC %0(s32)
+    %2:_(s32) = COPY $x11
+    %3:_(s8) = G_TRUNC %2(s32)
+    %4:_(s32) = COPY $x12
+    %5:_(s1) = G_TRUNC %4(s32)
+    %6:_(s8), %7:_(s1) = G_SSUBE %1, %3, %5
+    %8:_(s32) = G_ANYEXT %6(s8)
+    %9:_(s32) = G_ANYEXT %7(s1)
+    $x10 = COPY %8(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            ssube_i16
+body:             |
+  bb.1:
+    liveins: $x10, $x11, $x12
+
+    ; CHECK-LABEL: name: ssube_i16
+    ; CHECK: liveins: $x10, $x11, $x12
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+    ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[ASHR]], [[ASHR1]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
+    ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[AND]]
+    ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[SUB1]], [[C]](s32)
+    ; CHECK-NEXT: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C]](s32)
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB1]](s32), [[ASHR2]]
+    ; CHECK-NEXT: $x10 = COPY [[SUB1]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:_(s32) = COPY $x10
+    %1:_(s16) = G_TRUNC %0(s32)
+    %2:_(s32) = COPY $x11
+    %3:_(s16) = G_TRUNC %2(s32)
+    %4:_(s32) = COPY $x12
+    %5:_(s1) = G_TRUNC %4(s32)
+    %6:_(s16), %7:_(s1) = G_SSUBE %1, %3, %5
+    %8:_(s32) = G_ANYEXT %6(s16)
+    %9:_(s32) = G_ANYEXT %7(s1)
+    $x10 = COPY %8(s32)
+    $x11 = COPY %9(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            ssube_i32
+body:             |
+  bb.1:
+    liveins: $x10, $x11, $x12
+
+    ; CHECK-LABEL: name: ssube_i32
+    ; CHECK: liveins: $x10, $x11, $x12
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[AND]]
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[ADD]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[SUB]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND1]](s32), [[C1]]
+    ; CHECK-NEXT: $x10 = COPY [[COPY3]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[ICMP]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
+    %0:_(s32) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s32) = COPY $x12
+    %3:_(s1) = G_TRUNC %2(s32)
+    %4:_(s32), %5:_(s1) = G_SSUBE %0, %1, %3
+    %6:_(s32) = G_ANYEXT %5(s1)
+    $x10 = COPY %4(s32)
+    $x11 = COPY %6(s32)
+    PseudoRET implicit $x10, implicit $x11
+
+...
+---
+name:            ssube_i64
+body:             |
+  bb.1:
+    liveins: $x10, $x11, $x12, $x13, $x14
+
+    ; CHECK-LABEL: name: ssube_i64
+    ; CHECK: liveins: $x10, $x11, $x12, $x13, $x14
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C]]
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[AND]]
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[ADD]]
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[SUB]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[XOR]], [[XOR1]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND1]](s32), [[C1]]
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY3]], [[ICMP]]
+    ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[ADD1]]
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[SUB1]](s32)
+    ; CHECK-NEXT: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[COPY3]]
+    ; CHECK-NEXT: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[COPY1]], [[SUB1]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[XOR2]], [[XOR3]]
+    ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[AND2]](s32), [[C1]]
+    ; CHECK-NEXT: $x10 = COPY [[COPY5]](s32)
+    ; CHECK-NEXT: $x11 = COPY [[COPY6]](s32)
+    ; CHECK-NEXT: $x12 = COPY [[ICMP1]](s32)
+    ; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11, implicit $x12
+    %0:_(s32) = COPY $x10
+    %1:_(s32) = COPY $x11
+    %2:_(s32) = COPY $x12
+    %3:_(s32) = COPY $x13
+    %4:_(s32) = COPY $x14
+    %5:_(s1)  = G_TRUNC %4(s32)
+    %6:_(s32), %7:_(s1) = G_SSUBE %0, %2, %5
+    %8:_(s32), %9:_(s1) = G_SSUBE %1, %3, %7
----------------
topperc wrote:

The result of a G_SSUBE isn't really a proper carry value to input into a G_SSUBE is it?

A s64 G_SSUBE on RV32 should be split into a G_USUBO for the lower half and G_SSUBE for the upper half I think. But that should be done by the legalizer.

https://github.com/llvm/llvm-project/pull/157855


More information about the llvm-commits mailing list