[llvm] [AArch64][GISel] Add legalizer support for @llvm.umul.with.overflow.i128 (PR #170101)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 2 20:05:51 PST 2025


https://github.com/ayank227 updated https://github.com/llvm/llvm-project/pull/170101

>From 6c7539fc3811382ef1a34d6c8dcbc1d5c5af2bd1 Mon Sep 17 00:00:00 2001
From: Ayan Kundu <ayank at nvidia.com>
Date: Fri, 28 Nov 2025 08:47:54 +0000
Subject: [PATCH] [AArch64][GISel] Add legalizer support for
 @llvm.umul.with.overflow.i128

This matches the similar pattern how SelectionDAG handles this.
---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |   1 +
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  88 +++++++++++++
 .../AArch64/GlobalISel/arm64-fallback.ll      |  13 --
 .../CodeGen/AArch64/i128_with_overflow.ll     | 119 ++++++++++++------
 4 files changed, 171 insertions(+), 50 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index a458cbd94ccb1..5cbbf719c5504 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -426,6 +426,7 @@ class LegalizerHelper {
   LLVM_ABI LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx,
                                              LLT NarrowTy);
   LLVM_ABI LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
+  LLVM_ABI LegalizeResult narrowScalarMULO(MachineInstr &MI, LLT Ty);
   LLVM_ABI LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
                                             LLT Ty);
   LLVM_ABI LegalizeResult narrowScalarExtract(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 1aa1d465d8da6..216b72909840f 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1669,6 +1669,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
   case TargetOpcode::G_MUL:
   case TargetOpcode::G_UMULH:
     return narrowScalarMul(MI, NarrowTy);
+  case TargetOpcode::G_UMULO:
+    return narrowScalarMULO(MI, NarrowTy);
   case TargetOpcode::G_EXTRACT:
     return narrowScalarExtract(MI, TypeIdx, NarrowTy);
   case TargetOpcode::G_INSERT:
@@ -7202,6 +7204,92 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
   return Legalized;
 }
 
+// Narrow unsigned multiplication with overflow (G_UMULO).
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarMULO(MachineInstr &MI, LLT NarrowTy) {
+  auto [DstReg, OverflowReg, Src1, Src2] = MI.getFirst4Regs();
+
+  LLT Ty = MRI.getType(DstReg);
+  if (Ty.isVector())
+    return UnableToLegalize;
+
+  unsigned Size = Ty.getSizeInBits();
+  unsigned NarrowSize = NarrowTy.getSizeInBits();
+  if (Size % NarrowSize != 0)
+    return UnableToLegalize;
+
+  unsigned NumParts = Size / NarrowSize;
+  if (NumParts != 2)
+    return UnableToLegalize; // Only handle i128→i64 narrowing
+
+  // Split inputs into high/low parts
+  SmallVector<Register, 2> Src1Parts, Src2Parts;
+  extractParts(Src1, NarrowTy, NumParts, Src1Parts, MIRBuilder, MRI);
+  extractParts(Src2, NarrowTy, NumParts, Src2Parts, MIRBuilder, MRI);
+
+  Register LHSLo = Src1Parts[0];
+  Register LHSHi = Src1Parts[1];
+  Register RHSLo = Src2Parts[0];
+  Register RHSHi = Src2Parts[1];
+
+  // Check if both high parts are non-zero → guaranteed overflow
+  auto Zero = MIRBuilder.buildConstant(NarrowTy, 0);
+  auto LHSHiNZ =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), LHSHi, Zero);
+  auto RHSHiNZ =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), RHSHi, Zero);
+  auto BothHiNonZero = MIRBuilder.buildAnd(LLT::scalar(1), LHSHiNZ, RHSHiNZ);
+
+  // Cross multiply LHSHi × RHSLo with overflow (use MUL+UMULH directly)
+  auto Mid1 = MIRBuilder.buildMul(NarrowTy, LHSHi, RHSLo);
+  auto Mid1Hi = MIRBuilder.buildUMulH(NarrowTy, LHSHi, RHSLo);
+  auto Ovf1 =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Mid1Hi, Zero);
+
+  // Cross multiply LHSLo × RHSHi with overflow (use MUL+UMULH directly)
+  auto Mid2 = MIRBuilder.buildMul(NarrowTy, LHSLo, RHSHi);
+  auto Mid2Hi = MIRBuilder.buildUMulH(NarrowTy, LHSLo, RHSHi);
+  auto Ovf2 =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Mid2Hi, Zero);
+
+  // Add the cross products (HighSum = Mid1 + Mid2)
+  auto HighSum = MIRBuilder.buildAdd(NarrowTy, Mid1, Mid2);
+
+  // Multiply low parts to get full 128-bit result (using ZEXT pattern)
+  LLT WideTy = LLT::scalar(Size);
+  auto LHSLoExt = MIRBuilder.buildZExt(WideTy, LHSLo);
+  auto RHSLoExt = MIRBuilder.buildZExt(WideTy, RHSLo);
+  auto FullMul = MIRBuilder.buildMul(WideTy, LHSLoExt, RHSLoExt).getReg(0);
+
+  SmallVector<Register, 2> LowMulParts;
+  extractParts(FullMul, NarrowTy, NumParts, LowMulParts, MIRBuilder, MRI);
+  Register ResLo = LowMulParts[0];
+  Register ResHi = LowMulParts[1];
+
+  // Add HighSum to ResHi with overflow detection
+  auto AddHighSum =
+      MIRBuilder.buildUAddo(NarrowTy, LLT::scalar(1), ResHi, HighSum);
+  Register FinalHi = AddHighSum.getReg(0);
+  Register Ovf3 = AddHighSum.getReg(1);
+
+  // Combine all overflow flags
+  // overflow = BothHiNonZero || Ovf1 || Ovf2 || Ovf3
+  auto Ovf12 = MIRBuilder.buildOr(LLT::scalar(1), Ovf1, Ovf2);
+  auto Ovf123 = MIRBuilder.buildOr(LLT::scalar(1), Ovf12, Ovf3);
+  auto FinalOvf = MIRBuilder.buildOr(LLT::scalar(1), BothHiNonZero, Ovf123);
+
+  // Build final result
+  // Emit G_MERGE_VALUES for the result
+  SmallVector<Register, 2> ResultParts = {ResLo, FinalHi};
+  MIRBuilder.buildMergeLikeInstr(DstReg, ResultParts);
+
+  // Normalize overflow to s1 type
+  MIRBuilder.buildCopy(OverflowReg, FinalOvf);
+
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult
 LegalizerHelper::narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
                                    LLT NarrowTy) {
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index f8cd868a4c755..94469cf262e3e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -101,19 +101,6 @@ entry:
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %4:_(s128), %5:_(s1) = G_UMULO %0:_, %6:_ (in function: umul_s128)
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for umul_s128
-; FALLBACK-WITH-REPORT-OUT-LABEL: umul_s128
-declare {i128, i1} @llvm.umul.with.overflow.i128(i128, i128) nounwind readnone
-define zeroext i1 @umul_s128(i128 %v1, ptr %res) {
-entry:
-  %t = call {i128, i1} @llvm.umul.with.overflow.i128(i128 %v1, i128 2)
-  %val = extractvalue {i128, i1} %t, 0
-  %obit = extractvalue {i128, i1} %t, 1
-  store i128 %val, ptr %res
-  ret i1 %obit
-}
-
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: {{.*}}llvm.experimental.gc.statepoint{{.*}} (in function: gc_intr)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for gc_intr
 ; FALLBACK-WITH-REPORT-OUT-LABEL: gc_intr
diff --git a/llvm/test/CodeGen/AArch64/i128_with_overflow.ll b/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
index 3d90e094a5747..472ac0dbcacce 100644
--- a/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
+++ b/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
@@ -2,8 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -o - %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:       warning: Instruction selection used fallback path for test_umul_i128
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for test_smul_i128
+; CHECK-GI:  warning: Instruction selection used fallback path for test_smul_i128
 
 define i128 @test_uadd_i128(i128 noundef %x, i128 noundef %y) {
 ; CHECK-SD-LABEL: test_uadd_i128:
@@ -222,41 +221,87 @@ cleanup:
 }
 
 define i128 @test_umul_i128(i128 noundef %x, i128 noundef %y) {
-; CHECK-LABEL: test_umul_i128:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    orr x8, x1, x3
-; CHECK-NEXT:    cbz x8, .LBB4_2
-; CHECK-NEXT:  // %bb.1: // %overflow
-; CHECK-NEXT:    mul x9, x3, x0
-; CHECK-NEXT:    cmp x1, #0
-; CHECK-NEXT:    ccmp x3, #0, #4, ne
-; CHECK-NEXT:    umulh x10, x1, x2
-; CHECK-NEXT:    umulh x8, x3, x0
-; CHECK-NEXT:    madd x9, x1, x2, x9
-; CHECK-NEXT:    ccmp xzr, x10, #0, eq
-; CHECK-NEXT:    umulh x11, x0, x2
-; CHECK-NEXT:    ccmp xzr, x8, #0, eq
-; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cset w8, ne
-; CHECK-NEXT:    adds x1, x11, x9
-; CHECK-NEXT:    csinc w8, w8, wzr, lo
-; CHECK-NEXT:    cbnz w8, .LBB4_3
-; CHECK-NEXT:    b .LBB4_4
-; CHECK-NEXT:  .LBB4_2: // %overflow.no
-; CHECK-NEXT:    umulh x1, x0, x2
-; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cbz w8, .LBB4_4
-; CHECK-NEXT:  .LBB4_3: // %if.then
-; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    bl error
-; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT:    sxtw x0, w0
-; CHECK-NEXT:    asr x1, x0, #63
-; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:  .LBB4_4: // %cleanup
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_umul_i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    orr x8, x1, x3
+; CHECK-SD-NEXT:    cbz x8, .LBB4_2
+; CHECK-SD-NEXT:  // %bb.1: // %overflow
+; CHECK-SD-NEXT:    mul x9, x3, x0
+; CHECK-SD-NEXT:    cmp x1, #0
+; CHECK-SD-NEXT:    ccmp x3, #0, #4, ne
+; CHECK-SD-NEXT:    umulh x10, x1, x2
+; CHECK-SD-NEXT:    umulh x8, x3, x0
+; CHECK-SD-NEXT:    madd x9, x1, x2, x9
+; CHECK-SD-NEXT:    ccmp xzr, x10, #0, eq
+; CHECK-SD-NEXT:    umulh x11, x0, x2
+; CHECK-SD-NEXT:    ccmp xzr, x8, #0, eq
+; CHECK-SD-NEXT:    mul x0, x0, x2
+; CHECK-SD-NEXT:    cset w8, ne
+; CHECK-SD-NEXT:    adds x1, x11, x9
+; CHECK-SD-NEXT:    csinc w8, w8, wzr, lo
+; CHECK-SD-NEXT:    cbnz w8, .LBB4_3
+; CHECK-SD-NEXT:    b .LBB4_4
+; CHECK-SD-NEXT:  .LBB4_2: // %overflow.no
+; CHECK-SD-NEXT:    umulh x1, x0, x2
+; CHECK-SD-NEXT:    mul x0, x0, x2
+; CHECK-SD-NEXT:    cbz w8, .LBB4_4
+; CHECK-SD-NEXT:  .LBB4_3: // %if.then
+; CHECK-SD-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NEXT:    bl error
+; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT:    sxtw x0, w0
+; CHECK-SD-NEXT:    asr x1, x0, #63
+; CHECK-SD-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NEXT:  .LBB4_4: // %cleanup
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_umul_i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    orr x8, x1, x3
+; CHECK-GI-NEXT:    cbz x8, .LBB4_2
+; CHECK-GI-NEXT:  // %bb.1: // %overflow
+; CHECK-GI-NEXT:    umulh x8, x1, x2
+; CHECK-GI-NEXT:    cmp x1, #0
+; CHECK-GI-NEXT:    cset w12, ne
+; CHECK-GI-NEXT:    cmp x3, #0
+; CHECK-GI-NEXT:    mul x9, x0, x3
+; CHECK-GI-NEXT:    cset w13, ne
+; CHECK-GI-NEXT:    and w12, w12, w13
+; CHECK-GI-NEXT:    umulh x10, x0, x3
+; CHECK-GI-NEXT:    cmp x8, #0
+; CHECK-GI-NEXT:    madd x9, x1, x2, x9
+; CHECK-GI-NEXT:    cset w8, ne
+; CHECK-GI-NEXT:    umulh x11, x0, x2
+; CHECK-GI-NEXT:    cmp x10, #0
+; CHECK-GI-NEXT:    mul x0, x0, x2
+; CHECK-GI-NEXT:    cset w10, ne
+; CHECK-GI-NEXT:    orr w8, w8, w10
+; CHECK-GI-NEXT:    orr w8, w12, w8
+; CHECK-GI-NEXT:    adds x1, x11, x9
+; CHECK-GI-NEXT:    cset w9, hs
+; CHECK-GI-NEXT:    orr w8, w8, w9
+; CHECK-GI-NEXT:    tbnz w8, #0, .LBB4_3
+; CHECK-GI-NEXT:    b .LBB4_4
+; CHECK-GI-NEXT:  .LBB4_2: // %overflow.no
+; CHECK-GI-NEXT:    mov x8, x0
+; CHECK-GI-NEXT:    mul x0, x0, x2
+; CHECK-GI-NEXT:    umulh x1, x8, x2
+; CHECK-GI-NEXT:    mov w8, #0 // =0x0
+; CHECK-GI-NEXT:    tbz w8, #0, .LBB4_4
+; CHECK-GI-NEXT:  .LBB4_3: // %if.then
+; CHECK-GI-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NEXT:    bl error
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT:    asr w1, w0, #31
+; CHECK-GI-NEXT:    bfi x0, x1, #32, #32
+; CHECK-GI-NEXT:    bfi x1, x1, #32, #32
+; CHECK-GI-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT:  .LBB4_4: // %cleanup
+; CHECK-GI-NEXT:    ret
 entry:
   %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %x, i128 %y)
   %1 = extractvalue { i128, i1 } %0, 1



More information about the llvm-commits mailing list