[llvm] [CodeGen] Expand power-of-2 div/rem at IR level in ExpandIRInsts. (PR #180654)
Daniil Fukalov via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 9 17:07:14 PST 2026
https://github.com/dfukalov created https://github.com/llvm/llvm-project/pull/180654
Previously, power-of-2 div/rem operations wider than
MaxLegalDivRemBitWidth were excluded from IR expansion and left for
backend peephole optimizations. Some backends can fail to process such
instructions in case we switch off DAGCombiner.
Now ExpandIRInsts expands them into shift/mask sequences:
- udiv X, 2^C -> lshr X, C
- urem X, 2^C -> and X, (2^C - 1)
- sdiv X, 2^C -> bias adjustment + ashr X, C
- srem X, 2^C -> X - (((X + Bias) >> C) << C)
Special cases handled:
- Division/remainder by 1 or -1 (identity, negation, or zero)
- Exact division (sdiv exact skips bias, produces ashr exact)
- Negative power-of-2 divisors (result is negated)
- INT_MIN divisor (correct via countr_zero on bit pattern)
>From edcec54e623b5d0156c5c7e8a931dc0d917d4197 Mon Sep 17 00:00:00 2001
From: Daniil Fukalov <dfukalov at gmail.com>
Date: Mon, 9 Feb 2026 21:28:37 +0100
Subject: [PATCH] [CodeGen] Expand power-of-2 div/rem at IR level in
ExpandIRInsts.
Previously, power-of-2 div/rem operations wider than
MaxLegalDivRemBitWidth were excluded from IR expansion and left for
backend peephole optimizations. Some backends can fail to process such
instructions in case we switch off DAGCombiner.
Now ExpandIRInsts expands them into shift/mask sequences:
- udiv X, 2^C -> lshr X, C
- urem X, 2^C -> and X, (2^C - 1)
- sdiv X, 2^C -> bias adjustment + ashr X, C
- srem X, 2^C -> X - (((X + Bias) >> C) << C)
Special cases handled:
- Division/remainder by 1 or -1 (identity, negation, or zero)
- Exact division (sdiv exact skips bias, produces ashr exact)
- Negative power-of-2 divisors (result is negated)
- INT_MIN divisor (correct via countr_zero on bit pattern)
---
llvm/lib/CodeGen/ExpandIRInsts.cpp | 137 +++++++++++++++-
llvm/test/CodeGen/AMDGPU/div_i128.ll | 67 +++-----
llvm/test/CodeGen/AMDGPU/rem_i128.ll | 61 ++++----
.../Transforms/ExpandIRInsts/X86/sdiv129.ll | 148 ++++++++++++++++++
.../Transforms/ExpandIRInsts/X86/srem129.ll | 115 ++++++++++++++
.../Transforms/ExpandIRInsts/X86/udiv129.ll | 51 ++++++
.../Transforms/ExpandIRInsts/X86/urem129.ll | 25 +++
7 files changed, 521 insertions(+), 83 deletions(-)
diff --git a/llvm/lib/CodeGen/ExpandIRInsts.cpp b/llvm/lib/CodeGen/ExpandIRInsts.cpp
index 07a07872ea86f..ca1a6ad6c73c3 100644
--- a/llvm/lib/CodeGen/ExpandIRInsts.cpp
+++ b/llvm/lib/CodeGen/ExpandIRInsts.cpp
@@ -84,6 +84,115 @@ bool isSigned(unsigned int Opcode) {
return Opcode == Instruction::SDiv || Opcode == Instruction::SRem;
}
+/// For signed div/rem by a power of 2, compute the bias-adjusted dividend:
+/// Sign = X >> (BitWidth - 1) -- all 0s if X >= 0, all 1s if X < 0
+/// Bias = Sign >>> (BitWidth - ShiftAmt) -- 0 if X >= 0, (2^ShiftAmt - 1) if X < 0
+/// Adjusted = X + Bias
+/// This is equivalent to adding (2^ShiftAmt - 1) for negative X, which corrects
+/// rounding towards zero (instead of towards -inf that plain shift would give).
+/// The lshr form is used instead of 'and' to avoid large immediate constants.
+static Value *addSignedBias(IRBuilder<> &Builder, Value *X, unsigned BitWidth,
+ unsigned ShiftAmt) {
+ assert(ShiftAmt > 0 && ShiftAmt < BitWidth &&
+ "ShiftAmt out of range; callers should handle ShiftAmt == 0");
+ Value *Sign = Builder.CreateAShr(X, BitWidth - 1, "sign");
+ Value *Bias = Builder.CreateLShr(Sign, BitWidth - ShiftAmt, "bias");
+ return Builder.CreateAdd(X, Bias, "adjusted");
+}
+
+/// Expand division by a power-of-2 constant.
+/// For unsigned: udiv X, 2^C -> X >> C
+/// For signed: sdiv X, 2^C -> (X + Bias) >> C, where Bias adjusts
+/// for rounding towards zero on negative values.
+/// For exact division: sdiv exact X, 2^C -> ashr exact X, C (no bias needed)
+/// If divisor is a negative power of 2 (signed), the result is negated.
+static void expandPow2Division(BinaryOperator *Div) {
+ bool IsSigned = isSigned(Div->getOpcode());
+ bool IsExact = Div->isExact();
+ Value *X = Div->getOperand(0);
+ auto *C = cast<ConstantInt>(Div->getOperand(1));
+ Type *Ty = Div->getType();
+ unsigned BitWidth = Ty->getIntegerBitWidth();
+
+ APInt DivisorVal = C->getValue();
+ bool IsNegativeDivisor = IsSigned && DivisorVal.isNegative();
+ // Use countr_zero() to get the shift amount directly from the bit pattern.
+ // This works correctly for both positive and negative powers of 2, including
+ // INT_MIN, without needing to negate the value first.
+ unsigned ShiftAmt = DivisorVal.countr_zero();
+
+ IRBuilder<> Builder(Div);
+ Value *Result;
+
+ if (ShiftAmt == 0) {
+ // Division by 1 or -1.
+ // X / 1 = X, X / -1 = -X.
+ Result = IsNegativeDivisor ? Builder.CreateNeg(X) : X;
+ } else if (IsSigned) {
+ // For exact division, no bias is needed since there's no rounding.
+ Value *Dividend =
+ IsExact ? X : addSignedBias(Builder, X, BitWidth, ShiftAmt);
+ Result = Builder.CreateAShr(Dividend, ShiftAmt,
+ IsNegativeDivisor ? "pre.neg" : "", IsExact);
+ if (IsNegativeDivisor)
+ Result = Builder.CreateNeg(Result);
+ } else {
+ // udiv X, 2^C -> lshr X, C
+ Result = Builder.CreateLShr(X, ShiftAmt, "", IsExact);
+ }
+
+ Div->replaceAllUsesWith(Result);
+ // Transfer the name of the original instruction to its replacement,
+ // unless the result is the original dividend itself (div by 1).
+ if (Result != X)
+ if (auto *RI = dyn_cast<Instruction>(Result))
+ RI->takeName(Div);
+ Div->dropAllReferences();
+ Div->eraseFromParent();
+}
+
+/// Expand remainder by a power-of-2 constant.
+/// Let ShiftAmt = log2(|divisor|).
+/// For unsigned: urem X, 2^ShiftAmt -> X & (2^ShiftAmt - 1)
+/// For signed: srem X, 2^ShiftAmt -> X - (((X + Bias) >> ShiftAmt) << ShiftAmt)
+static void expandPow2Remainder(BinaryOperator *Rem) {
+ bool IsSigned = isSigned(Rem->getOpcode());
+ Value *X = Rem->getOperand(0);
+ auto *C = cast<ConstantInt>(Rem->getOperand(1));
+ Type *Ty = Rem->getType();
+ unsigned BitWidth = Ty->getIntegerBitWidth();
+
+ // Use countr_zero() to get the shift amount directly from the bit pattern.
+ // This works for both positive and negative powers of 2, including INT_MIN.
+ unsigned ShiftAmt = C->getValue().countr_zero();
+
+ IRBuilder<> Builder(Rem);
+ Value *Result;
+
+ if (ShiftAmt == 0) {
+ // Remainder by 1 or -1 is always 0.
+ Result = ConstantInt::get(Ty, 0);
+ } else if (IsSigned) {
+ Value *Adjusted = addSignedBias(Builder, X, BitWidth, ShiftAmt);
+ // Clear lower ShiftAmt bits via round-trip shift:
+ // Truncated = (Adjusted >> ShiftAmt) << ShiftAmt
+ Value *Shifted = Builder.CreateAShr(Adjusted, ShiftAmt, "shifted");
+ Value *Truncated = Builder.CreateShl(Shifted, ShiftAmt, "truncated");
+ Result = Builder.CreateSub(X, Truncated);
+ } else {
+ // urem X, 2^ShiftAmt -> X & (2^ShiftAmt - 1)
+ APInt Mask = APInt::getLowBitsSet(BitWidth, ShiftAmt);
+ Value *MaskVal = ConstantInt::get(Ty, Mask);
+ Result = Builder.CreateAnd(X, MaskVal);
+ }
+
+ Rem->replaceAllUsesWith(Result);
+ if (auto *RI = dyn_cast<Instruction>(Result))
+ RI->takeName(Rem);
+ Rem->dropAllReferences();
+ Rem->eraseFromParent();
+}
+
/// This class implements a precise expansion of the frem instruction.
/// The generated code is based on the fmod implementation in the AMD device
/// libs.
@@ -1083,12 +1192,14 @@ static bool runImpl(Function &F, const TargetLowering &TLI,
case Instruction::SDiv:
case Instruction::URem:
case Instruction::SRem:
+ // TODO: We don't consider vectors here.
+ // Power-of-2 divisors are handled inside the expansion (via efficient
+ // shift/mask sequences) rather than being excluded here, so that
+ // backends that cannot lower wide div/rem even for powers of two
+ // (e.g. when DAGCombiner is disabled) still get valid lowered code.
return !DisableExpandLargeDivRem &&
cast<IntegerType>(Ty->getScalarType())->getIntegerBitWidth() >
- MaxLegalDivRemBitWidth
- // The backend has peephole optimizations for powers of two.
- // TODO: We don't consider vectors here.
- && !isConstantPowerOfTwo(I.getOperand(1), isSigned(I.getOpcode()));
+ MaxLegalDivRemBitWidth;
}
return false;
@@ -1134,14 +1245,24 @@ static bool runImpl(Function &F, const TargetLowering &TLI,
break;
case Instruction::UDiv:
- case Instruction::SDiv:
- expandDivision(cast<BinaryOperator>(I));
+ case Instruction::SDiv: {
+ auto *BO = cast<BinaryOperator>(I);
+ if (isConstantPowerOfTwo(BO->getOperand(1), isSigned(BO->getOpcode())))
+ expandPow2Division(BO);
+ else
+ expandDivision(BO);
break;
+ }
case Instruction::URem:
- case Instruction::SRem:
- expandRemainder(cast<BinaryOperator>(I));
+ case Instruction::SRem: {
+ auto *BO = cast<BinaryOperator>(I);
+ if (isConstantPowerOfTwo(BO->getOperand(1), isSigned(BO->getOpcode())))
+ expandPow2Remainder(BO);
+ else
+ expandRemainder(BO);
break;
}
+ }
}
return Modified;
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 5a4aa4effac00..c3c0ac9c1dbcc 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -4268,19 +4268,21 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v3
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-O0-NEXT: s_mov_b32 s4, 63
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v5
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v4
+; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: s_mov_b32 s5, 31
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s5, v[6:7]
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_ashrrev_i64 v[4:5], s4, v[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s5, 31
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s5, v[4:5]
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
; GFX9-O0-NEXT: s_mov_b64 s[8:9], 0
@@ -4291,12 +4293,12 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-NEXT: v_mov_b32_e32 v4, s6
; GFX9-O0-NEXT: v_addc_co_u32_e32 v5, vcc, v2, v4, vcc
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
-; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
+; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
; GFX9-O0-NEXT: s_mov_b32 s4, 33
; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
@@ -4377,13 +4379,7 @@ define i128 @v_sdiv_exact_i128_v_pow2k(i128 %lhs) {
; GFX9-LABEL: v_sdiv_exact_i128_v_pow2k:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GFX9-NEXT: v_mov_b32_e32 v5, v4
-; GFX9-NEXT: v_lshrrev_b64 v[4:5], 31, v[4:5]
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
-; GFX9-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v5, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
+; GFX9-NEXT: v_mov_b32_e32 v4, v1
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[2:3]
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 1, v4
; GFX9-NEXT: v_or_b32_e32 v0, v2, v0
@@ -4394,42 +4390,17 @@ define i128 @v_sdiv_exact_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-LABEL: v_sdiv_exact_i128_v_pow2k:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
-; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
-; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
-; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
-; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_ashrrev_i64 v[4:5], s4, v[4:5]
-; GFX9-O0-NEXT: s_mov_b32 s5, 31
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s5, v[4:5]
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
-; GFX9-O0-NEXT: s_mov_b64 s[8:9], 0
-; GFX9-O0-NEXT: s_mov_b32 s6, s8
-; GFX9-O0-NEXT: s_mov_b32 s4, s9
-; GFX9-O0-NEXT: v_add_co_u32_e32 v0, vcc, v0, v5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v4, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, s6
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v5, vcc, v2, v4, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
+; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v3
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
; GFX9-O0-NEXT: s_mov_b32 s4, 33
; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 killed $vgpr0_vgpr1 killed $exec
+; GFX9-O0-NEXT: s_mov_b32 s5, 31
; GFX9-O0-NEXT: v_lshl_or_b32 v0, v2, s5, v0
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v5
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
diff --git a/llvm/test/CodeGen/AMDGPU/rem_i128.ll b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
index 4e1f0c0538bb5..a937260f6805c 100644
--- a/llvm/test/CodeGen/AMDGPU/rem_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/rem_i128.ll
@@ -2657,49 +2657,56 @@ define i128 @v_srem_i128_v_pow2k(i128 %lhs) {
; GFX9-O0-LABEL: v_srem_i128_v_pow2k:
; GFX9-O0: ; %bb.0:
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-O0-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v3
-; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
-; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
-; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
-; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
-; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
+; GFX9-O0-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
+; GFX9-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-O0-NEXT: ; kill: def $vgpr3 killed $vgpr3 def $vgpr3_vgpr4 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
+; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
; GFX9-O0-NEXT: s_mov_b32 s4, 63
-; GFX9-O0-NEXT: v_ashrrev_i64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v1
+; GFX9-O0-NEXT: v_ashrrev_i64 v[5:6], s4, v[5:6]
; GFX9-O0-NEXT: s_mov_b32 s4, 31
-; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[6:7]
+; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[5:6]
+; GFX9-O0-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_mov_b32 s4, s7
; GFX9-O0-NEXT: v_add_co_u32_e32 v6, vcc, v5, v4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v0, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v2, vcc
-; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v2, vcc, v0, v2, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s5
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v8, vcc, v3, v4, vcc
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, s4
+; GFX9-O0-NEXT: v_addc_co_u32_e32 v4, vcc, v1, v4, vcc
+; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v9, v4
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v4, v7
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v2, v7
; GFX9-O0-NEXT: s_mov_b32 s6, -2
; GFX9-O0-NEXT: s_mov_b32 s4, 0
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
; GFX9-O0-NEXT: s_mov_b32 s5, s6
; GFX9-O0-NEXT: s_mov_b32 s6, s5
-; GFX9-O0-NEXT: v_and_b32_e64 v4, v4, s6
-; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
+; GFX9-O0-NEXT: v_and_b32_e64 v2, v2, s6
+; GFX9-O0-NEXT: v_mov_b32_e32 v4, v6
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 killed $sgpr4_sgpr5
-; GFX9-O0-NEXT: v_and_b32_e64 v9, v6, s4
-; GFX9-O0-NEXT: ; kill: def $vgpr9 killed $vgpr9 def $vgpr9_vgpr10 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v10, v4
-; GFX9-O0-NEXT: v_mov_b32_e32 v7, v9
-; GFX9-O0-NEXT: v_mov_b32_e32 v6, v10
-; GFX9-O0-NEXT: ; kill: def $vgpr8 killed $vgpr8 def $vgpr8_vgpr9 killed $exec
-; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
+; GFX9-O0-NEXT: v_and_b32_e64 v10, v4, s4
+; GFX9-O0-NEXT: ; kill: def $vgpr10 killed $vgpr10 def $vgpr10_vgpr11 killed $exec
+; GFX9-O0-NEXT: v_mov_b32_e32 v11, v2
+; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
+; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v8
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v9
; GFX9-O0-NEXT: v_sub_co_u32_e32 v5, vcc, v5, v7
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
index 751bdbade15d9..f8f588310c541 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/sdiv129.ll
@@ -76,6 +76,154 @@ define void @sdiv129(ptr %ptr, ptr %out) nounwind !prof !0 {
ret void
}
+define void @test_sdiv_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 126
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[RES:%.*]] = ashr i129 [[ADJUSTED]], 3
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_neg_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_neg_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 126
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[PRE_NEG:%.*]] = ashr i129 [[ADJUSTED]], 3
+; CHECK-NEXT: [[RES:%.*]] = sub i129 0, [[PRE_NEG]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, -8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_by_1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_by_1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: store i129 [[A]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, 1
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_by_neg1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_by_neg1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = sub i129 0, [[A]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, -1
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_exact_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_exact_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = ashr exact i129 [[A]], 3
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv exact i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_exact_neg_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_exact_neg_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[PRE_NEG:%.*]] = ashr exact i129 [[A]], 3
+; CHECK-NEXT: [[RES:%.*]] = sub i129 0, [[PRE_NEG]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv exact i129 %a, -8
+ store i129 %res, ptr %out
+ ret void
+}
+
+; INT_MIN = -2^128
+define void @test_sdiv_intmin(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_intmin(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 1
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[PRE_NEG:%.*]] = ashr i129 [[ADJUSTED]], 128
+; CHECK-NEXT: [[RES:%.*]] = sub i129 0, [[PRE_NEG]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, -340282366920938463463374607431768211456
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_by_2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_by_2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 128
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[RES:%.*]] = ashr i129 [[ADJUSTED]], 1
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, 2
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_exact_by_2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_exact_by_2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = ashr exact i129 [[A]], 1
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv exact i129 %a, 2
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_sdiv_large_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_sdiv_large_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 65
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[RES:%.*]] = ashr i129 [[ADJUSTED]], 64
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = sdiv i129 %a, 18446744073709551616
+ store i129 %res, ptr %out
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1000}
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
index 45491ccda2b19..84fb376d32724 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/srem129.ll
@@ -79,6 +79,121 @@ define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
ret void
}
+define void @test_srem_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 126
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = ashr i129 [[ADJUSTED]], 3
+; CHECK-NEXT: [[TRUNCATED:%.*]] = shl i129 [[SHIFTED]], 3
+; CHECK-NEXT: [[RES:%.*]] = sub i129 [[A]], [[TRUNCATED]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_srem_neg_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_neg_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 126
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = ashr i129 [[ADJUSTED]], 3
+; CHECK-NEXT: [[TRUNCATED:%.*]] = shl i129 [[SHIFTED]], 3
+; CHECK-NEXT: [[RES:%.*]] = sub i129 [[A]], [[TRUNCATED]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, -8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_srem_by_1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_by_1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: store i129 0, ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, 1
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_srem_by_neg1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_by_neg1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: store i129 0, ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, -1
+ store i129 %res, ptr %out
+ ret void
+}
+
+; INT_MIN = -2^128
+define void @test_srem_intmin(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_intmin(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 1
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = ashr i129 [[ADJUSTED]], 128
+; CHECK-NEXT: [[TRUNCATED:%.*]] = shl i129 [[SHIFTED]], 128
+; CHECK-NEXT: [[RES:%.*]] = sub i129 [[A]], [[TRUNCATED]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, -340282366920938463463374607431768211456
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_srem_by_2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_by_2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 128
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = ashr i129 [[ADJUSTED]], 1
+; CHECK-NEXT: [[TRUNCATED:%.*]] = shl i129 [[SHIFTED]], 1
+; CHECK-NEXT: [[RES:%.*]] = sub i129 [[A]], [[TRUNCATED]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, 2
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_srem_large_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_srem_large_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[SIGN:%.*]] = ashr i129 [[A]], 128
+; CHECK-NEXT: [[BIAS:%.*]] = lshr i129 [[SIGN]], 65
+; CHECK-NEXT: [[ADJUSTED:%.*]] = add i129 [[A]], [[BIAS]]
+; CHECK-NEXT: [[SHIFTED:%.*]] = ashr i129 [[ADJUSTED]], 64
+; CHECK-NEXT: [[TRUNCATED:%.*]] = shl i129 [[SHIFTED]], 64
+; CHECK-NEXT: [[RES:%.*]] = sub i129 [[A]], [[TRUNCATED]]
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = srem i129 %a, 18446744073709551616
+ store i129 %res, ptr %out
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1000}
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
index 6ad696ae446fd..3f51f0279c585 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/udiv129.ll
@@ -65,6 +65,57 @@ define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
ret void
}
+define void @test_udiv_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_udiv_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = lshr i129 [[A]], 3
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = udiv i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_udiv_by_1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_udiv_by_1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: store i129 [[A]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = udiv i129 %a, 1
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_udiv_large_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_udiv_large_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = lshr i129 [[A]], 64
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = udiv i129 %a, 18446744073709551616
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_udiv_exact_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_udiv_exact_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = lshr exact i129 [[A]], 3
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = udiv exact i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1000}
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
diff --git a/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll b/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
index a4c4ac2cba329..b192cb840f074 100644
--- a/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
+++ b/llvm/test/Transforms/ExpandIRInsts/X86/urem129.ll
@@ -69,6 +69,31 @@ define void @test(ptr %ptr, ptr %out) nounwind !prof !0 {
ret void
}
+define void @test_urem_pow2(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_urem_pow2(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: [[RES:%.*]] = and i129 [[A]], 7
+; CHECK-NEXT: store i129 [[RES]], ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = urem i129 %a, 8
+ store i129 %res, ptr %out
+ ret void
+}
+
+define void @test_urem_by_1(ptr %ptr, ptr %out) nounwind {
+; CHECK-LABEL: @test_urem_by_1(
+; CHECK-NEXT: [[A:%.*]] = load i129, ptr [[PTR:%.*]], align 16
+; CHECK-NEXT: store i129 0, ptr [[OUT:%.*]], align 16
+; CHECK-NEXT: ret void
+;
+ %a = load i129, ptr %ptr
+ %res = urem i129 %a, 1
+ store i129 %res, ptr %out
+ ret void
+}
+
!0 = !{!"function_entry_count", i64 1000}
;.
; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
More information about the llvm-commits
mailing list