[llvm] [RISCV] Default to MicroOpBufferSize = 1 for scheduling purposes (PR #126608)

via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 10 14:08:16 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Philip Reames (preames)

<details>
<summary>Changes</summary>

This change introduces a default schedule model for the RISCV target which leaves everything unchanged except the MicroOpBufferSize.  The default value of this flag in NoSched is 0.  Both configurations represent in order cores (i.e. no reorder window), the difference between them comes down to whether heuristics other than latency are allowed to apply.  (Implementation details below)

I left the processor models which explicitly set MicroOpBufferSize=0 unchanged in this patch, but strongly suspect we should change those too.  Honestly, I think the LLVM wide default for this flag should be changed, but don't have the energy to manage the updates for all targets.

Implementation wise, the effect of this change is that schedule units which are ready to run *except that* one of their predecessors may not have completed yet are added to the Available list, not the Pending one. The result of this is that it becomes possible to chose to schedule a node before it's ready cycle if the heuristics prefer.  This is essentially chosing to insert a resource stall instead of e.g. increasing register pressure.

Note that I was initially concerned there might be a correctness aspect (as in some kind of exposed pipeline design), but the generic scheduler doesn't seem to know how to insert noop instructions.  Without that, a program wouldn't be guaranteed to schedule on an exposed pipeline depending on the program and schedule model in question.

The effect of this is that we sometimes prefer register pressure in codegen results.  This is mostly churn (or small wins) on scalar because we have many more registers, but is of major importance on vector - particularly high LMUL - because we effectively have many fewer registers and the relative cost of spilling is much higher. This is a significant improvement on high LMUL code quality for default rva23u configurations - or any non -mcpu vector configuration for that matter.

---

Patch is 4.54 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/126608.diff


402 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVProcessors.td (+15-6) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll (+96-96) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll (+71-71) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/float-arith.ll (+54-54) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rotl-rotr.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv64zbkb.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/shifts.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/stacksave-stackrestore.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll (+68-68) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/wide-scalar-shift-by-byte-multiple-legalization.ll (+1563-1609) 
- (modified) llvm/test/CodeGen/RISCV/abds-neg.ll (+346-346) 
- (modified) llvm/test/CodeGen/RISCV/abds.ll (+102-102) 
- (modified) llvm/test/CodeGen/RISCV/abdu-neg.ll (+162-162) 
- (modified) llvm/test/CodeGen/RISCV/abdu.ll (+430-430) 
- (modified) llvm/test/CodeGen/RISCV/add-before-shl.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/add-imm.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/alloca.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/alu64.ll (+4-5) 
- (modified) llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll (+68-68) 
- (modified) llvm/test/CodeGen/RISCV/atomic-rmw.ll (+2400-2440) 
- (modified) llvm/test/CodeGen/RISCV/atomic-signext.ll (+642-648) 
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-cond-sub-clamp.ll (+304-308) 
- (modified) llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll (+313-317) 
- (modified) llvm/test/CodeGen/RISCV/bf16-promote.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/bfloat-convert.ll (+48-48) 
- (modified) llvm/test/CodeGen/RISCV/bfloat-mem.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/bfloat.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/bittest.ll (+371-371) 
- (modified) llvm/test/CodeGen/RISCV/branch-on-zero.ll (+4-6) 
- (modified) llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll (+128-128) 
- (modified) llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll (+100-100) 
- (modified) llvm/test/CodeGen/RISCV/callee-saved-gprs.ll (+284-284) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-half.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll (+68-72) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll (+128-128) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32e.ll (+324-324) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll (+17-17) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll (+46-46) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-lp64.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-lp64e.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32e.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/calls.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/codemodel-lowering.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/condbinops.ll (+5-6) 
- (modified) llvm/test/CodeGen/RISCV/condops.ll (+36-36) 
- (modified) llvm/test/CodeGen/RISCV/copysign-casts.ll (+9-9) 
- (modified) llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/double-calling-conv.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/double-convert.ll (+64-64) 
- (modified) llvm/test/CodeGen/RISCV/double-fcmp-strict.ll (+20-32) 
- (modified) llvm/test/CodeGen/RISCV/double-imm.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/double-mem.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/double-round-conv-sat.ll (+42-42) 
- (modified) llvm/test/CodeGen/RISCV/double-select-fcmp.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/fastcc-bf16.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/fastcc-float.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/fastcc-half.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/fastcc-without-f-reg.ll (+80-64) 
- (modified) llvm/test/CodeGen/RISCV/float-convert.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/float-fcmp-strict.ll (+13-19) 
- (modified) llvm/test/CodeGen/RISCV/float-select-fcmp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/fold-addi-loadstore.ll (+22-22) 
- (modified) llvm/test/CodeGen/RISCV/forced-atomics.ll (+146-146) 
- (modified) llvm/test/CodeGen/RISCV/fp-fcanonicalize.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/fp128.ll (+56-56) 
- (modified) llvm/test/CodeGen/RISCV/fpclamptosat.ll (+174-174) 
- (modified) llvm/test/CodeGen/RISCV/get-setcc-result-type.ll (+14-14) 
- (modified) llvm/test/CodeGen/RISCV/half-arith.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/half-convert-strict.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/half-convert.ll (+326-326) 
- (modified) llvm/test/CodeGen/RISCV/half-fcmp-strict.ll (+57-63) 
- (modified) llvm/test/CodeGen/RISCV/half-intrinsics.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/half-mem.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/half-select-fcmp.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/iabs.ll (+68-68) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-d-modifier-N.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-f-modifier-N.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-zfinx-constraint-r.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm-zhinx-constraint-r.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/inline-asm.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/legalize-fneg.ll (+19-19) 
- (modified) llvm/test/CodeGen/RISCV/llvm.exp10.ll (+20-20) 
- (modified) llvm/test/CodeGen/RISCV/llvm.frexp.ll (+160-166) 
- (modified) llvm/test/CodeGen/RISCV/loop-strength-reduce-add-cheaper-than-mul.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/machine-sink-load-immediate.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/macro-fusion-lui-addi.ll (+2-3) 
- (modified) llvm/test/CodeGen/RISCV/mem.ll (+4-2) 
- (modified) llvm/test/CodeGen/RISCV/mem64.ll (+4-2) 
- (modified) llvm/test/CodeGen/RISCV/memcmp-optsize.ll (+171-171) 
- (modified) llvm/test/CodeGen/RISCV/memcmp.ll (+171-171) 
- (modified) llvm/test/CodeGen/RISCV/memmove.ll (+62-62) 
- (modified) llvm/test/CodeGen/RISCV/memset-pattern.ll (+60-62) 
- (modified) llvm/test/CodeGen/RISCV/mul.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/neg-abs.ll (+26-30) 
- (modified) llvm/test/CodeGen/RISCV/orc-b-patterns.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/overflow-intrinsics.ll (+30-30) 
- (modified) llvm/test/CodeGen/RISCV/pr51206.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/pr58511.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/pr63816.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/pr69586.ll (+550-607) 
- (modified) llvm/test/CodeGen/RISCV/push-pop-popret.ll (+492-492) 
- (modified) llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rotl-rotr.ll (+117-117) 
- (modified) llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rv32zbb.ll (+55-55) 
- (modified) llvm/test/CodeGen/RISCV/rv32zbs.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rv64-double-convert.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rv64-half-convert.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rv64-trampoline.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rv64zbkb.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-array.ll (+9-9) 
- (modified) llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-sdnode.ll (+96-96) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll (+175-171) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-sdnode.ll (+74-74) 
- (modified) llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll (+202-196) 
- (modified) llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll (+73-109) 
- (modified) llvm/test/CodeGen/RISCV/rvv/calling-conv.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll (+156-149) 
- (modified) llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll (+31-31) 
- (modified) llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll (+9-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/copyprop.mir (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll (+123-123) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll (+30-30) 
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll (+172-164) 
- (modified) llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll (+49-49) 
- (modified) llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll (+33-33) 
- (modified) llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll (+48-48) 
- (modified) llvm/test/CodeGen/RISCV/rvv/expandload.ll (+790-847) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll (+25-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll (+59-59) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll (+25-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll (+59-59) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll (+28-35) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast-large-vector.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll (+193-189) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll (+156-152) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll (+19-20) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll (+24-27) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll (+17-16) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll (+84-84) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll (+424-668) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll (+244-256) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll (+467-565) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll (+28-28) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll (+22-22) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll (+36-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll (+18-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll (+84-84) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll (+38-46) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll (+24-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll (+38-46) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll (+24-32) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll (+31-31) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll (+6-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll (+74-66) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll (+30-27) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll (+369-369) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll (+49-49) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll (+24-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll (+49-49) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll (+17-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll (+500-491) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll (+57-60) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll (+62-63) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll (+26-28) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll (+430-432) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll (+52-50) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll (+124-120) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll (+15-15) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll (+238-238) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll (+7-7) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll (+318-318) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll (+7-7) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll (+98-98) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll (+42-42) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll (+188-234) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll (+37-35) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll (+126-126) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll (+84-84) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll (+84-84) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll (+84-84) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-select-addsub.ll (+3-21) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll (+2024-1818) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll (+19-44) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll (+23-23) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll (+10-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll (+20-21) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shufflevector-vnsrl.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store-merge-crash.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll (+12-12) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll (+45-45) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll (+15-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll (+15-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll (+15-25) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll (+81-81) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll (+7-7) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll (+46-46) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll (+5-5) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll (+24-80) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll (+32-125) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll (+156-156) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd-mask.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll (+4-4) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub-mask.ll (+2-2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll (+22-22) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll (+22-22) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/float-round-conv.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/floor-vp.ll (+156-149) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll (+52-64) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll (+77-110) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll (+52-64) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td
index b5eea138732a557..c54afa1e6e72e0c 100644
--- a/llvm/lib/Target/RISCV/RISCVProcessors.td
+++ b/llvm/lib/Target/RISCV/RISCVProcessors.td
@@ -88,21 +88,30 @@ class RISCVTuneProcessorModel<string n,
 
 defvar GenericTuneFeatures = [TuneOptimizedNF2SegmentLoadStore];
 
+// Adjust the default cost model to enable all heuristics, not just latency
+// In particular, this enables register pressure heustics which are very
+// important for high LMUL vector code, and have little negative impact
+// on other configurations,
+def GenericModel : SchedMachineModel {
+  let MicroOpBufferSize = 1;
+  let CompleteModel = 0;
+}
+
 def GENERIC_RV32 : RISCVProcessorModel<"generic-rv32",
-                                       NoSchedModel,
+                                       GenericModel,
                                        [Feature32Bit,
                                         FeatureStdExtI],
                                        GenericTuneFeatures>,
                    GenericTuneInfo;
 def GENERIC_RV64 : RISCVProcessorModel<"generic-rv64",
-                                       NoSchedModel,
+                                       GenericModel,
                                        [Feature64Bit,
                                         FeatureStdExtI],
                                        GenericTuneFeatures>,
                    GenericTuneInfo;
 // Support generic for compatibility with other targets. The triple will be used
 // to change to the appropriate rv32/rv64 version.
-def GENERIC : RISCVTuneProcessorModel<"generic", NoSchedModel>, GenericTuneInfo;
+def GENERIC : RISCVTuneProcessorModel<"generic", GenericModel>, GenericTuneInfo;
 
 def MIPS_P8700 : RISCVProcessorModel<"mips-p8700",
                                      MIPSP8700Model,
@@ -496,7 +505,7 @@ def TENSTORRENT_ASCALON_D8 : RISCVProcessorModel<"tt-ascalon-d8",
                                                   TunePostRAScheduler]>;
 
 def VENTANA_VEYRON_V1 : RISCVProcessorModel<"veyron-v1",
-                                            NoSchedModel,
+                                            GenericModel,
                                             [Feature64Bit,
                                              FeatureStdExtI,
                                              FeatureStdExtZifencei,
@@ -556,7 +565,7 @@ def XIANGSHAN_NANHU : RISCVProcessorModel<"xiangshan-nanhu",
                                             TuneShiftedZExtWFusion]>;
 
 def SPACEMIT_X60 : RISCVProcessorModel<"spacemit-x60",
-                                       NoSchedModel,
+                                       GenericModel,
                                        !listconcat(RVA22S64Features,
                                        [FeatureStdExtV,
                                         FeatureStdExtSscofpmf,
@@ -581,7 +590,7 @@ def SPACEMIT_X60 : RISCVProcessorModel<"spacemit-x60",
 }
 
 def RP2350_HAZARD3 : RISCVProcessorModel<"rp2350-hazard3",
-                                         NoSchedModel,
+                                         GenericModel,
                                          [Feature32Bit,
                                           FeatureStdExtI,
                                           FeatureStdExtM,
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
index 0fd23a7d346dfd3..1b96189aaea5c7c 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
@@ -212,30 +212,30 @@ define i64 @add64_accept(i64 %a) nounwind {
 define void @add32_reject() nounwind {
 ; RV32I-LABEL: add32_reject:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    lui a0, %hi(ga)
-; RV32I-NEXT:    lui a1, %hi(gb)
-; RV32I-NEXT:    lw a2, %lo(ga)(a0)
-; RV32I-NEXT:    lw a3, %lo(gb)(a1)
-; RV32I-NEXT:    lui a4, 1
-; RV32I-NEXT:    addi a4, a4, -1096
-; RV32I-NEXT:    add a2, a2, a4
-; RV32I-NEXT:    add a3, a3, a4
-; RV32I-NEXT:    sw a2, %lo(ga)(a0)
-; RV32I-NEXT:    sw a3, %lo(gb)(a1)
+; RV32I-NEXT:    lui a0, 1
+; RV32I-NEXT:    lui a1, %hi(ga)
+; RV32I-NEXT:    lui a2, %hi(gb)
+; RV32I-NEXT:    lw a3, %lo(ga)(a1)
+; RV32I-NEXT:    lw a4, %lo(gb)(a2)
+; RV32I-NEXT:    addi a0, a0, -1096
+; RV32I-NEXT:    add a3, a3, a0
+; RV32I-NEXT:    add a0, a4, a0
+; RV32I-NEXT:    sw a3, %lo(ga)(a1)
+; RV32I-NEXT:    sw a0, %lo(gb)(a2)
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add32_reject:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    lui a0, %hi(ga)
-; RV64I-NEXT:    lui a1, %hi(gb)
-; RV64I-NEXT:    lw a2, %lo(ga)(a0)
-; RV64I-NEXT:    lw a3, %lo(gb)(a1)
-; RV64I-NEXT:    lui a4, 1
-; RV64I-NEXT:    addi a4, a4, -1096
-; RV64I-NEXT:    add a2, a2, a4
-; RV64I-NEXT:    add a3, a3, a4
-; RV64I-NEXT:    sw a2, %lo(ga)(a0)
-; RV64I-NEXT:    sw a3, %lo(gb)(a1)
+; RV64I-NEXT:    lui a0, 1
+; RV64I-NEXT:    lui a1, %hi(ga)
+; RV64I-NEXT:    lui a2, %hi(gb)
+; RV64I-NEXT:    lw a3, %lo(ga)(a1)
+; RV64I-NEXT:    lw a4, %lo(gb)(a2)
+; RV64I-NEXT:    addi a0, a0, -1096
+; RV64I-NEXT:    add a3, a3, a0
+; RV64I-NEXT:    add a0, a4, a0
+; RV64I-NEXT:    sw a3, %lo(ga)(a1)
+; RV64I-NEXT:    sw a0, %lo(gb)(a2)
 ; RV64I-NEXT:    ret
   %1 = load i32, ptr @ga, align 4
   %2 = load i32, ptr @gb, align 4
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
index 3a55189076deeea..5b9f0e60e7d808e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
@@ -93,49 +93,49 @@ define i32 @expanded_neg_abs32_unsigned(i32 %x) {
 define i64 @expanded_neg_abs64(i64 %x) {
 ; RV32I-LABEL: expanded_neg_abs64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    snez a2, a0
-; RV32I-NEXT:    neg a3, a1
-; RV32I-NEXT:    sub a2, a3, a2
-; RV32I-NEXT:    neg a3, a0
-; RV32I-NEXT:    beq a2, a1, .LBB2_2
+; RV32I-NEXT:    neg a2, a0
+; RV32I-NEXT:    snez a3, a0
+; RV32I-NEXT:    neg a4, a1
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    beq a3, a1, .LBB2_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt a4, a1, a2
+; RV32I-NEXT:    slt a4, a1, a3
 ; RV32I-NEXT:    beqz a4, .LBB2_3
 ; RV32I-NEXT:    j .LBB2_4
 ; RV32I-NEXT:  .LBB2_2:
-; RV32I-NEXT:    sltu a4, a0, a3
+; RV32I-NEXT:    sltu a4, a0, a2
 ; RV32I-NEXT:    bnez a4, .LBB2_4
 ; RV32I-NEXT:  .LBB2_3:
-; RV32I-NEXT:    mv a3, a0
-; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:  .LBB2_4:
-; RV32I-NEXT:    neg a0, a3
-; RV32I-NEXT:    snez a1, a3
-; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    neg a0, a2
+; RV32I-NEXT:    snez a1, a2
+; RV32I-NEXT:    neg a2, a3
 ; RV32I-NEXT:    sub a1, a2, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: expanded_neg_abs64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    snez a2, a0
-; RV32ZBB-NEXT:    neg a3, a1
-; RV32ZBB-NEXT:    sub a2, a3, a2
-; RV32ZBB-NEXT:    neg a3, a0
-; RV32ZBB-NEXT:    beq a2, a1, .LBB2_2
+; RV32ZBB-NEXT:    neg a2, a0
+; RV32ZBB-NEXT:    snez a3, a0
+; RV32ZBB-NEXT:    neg a4, a1
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    beq a3, a1, .LBB2_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt a4, a1, a2
+; RV32ZBB-NEXT:    slt a4, a1, a3
 ; RV32ZBB-NEXT:    beqz a4, .LBB2_3
 ; RV32ZBB-NEXT:    j .LBB2_4
 ; RV32ZBB-NEXT:  .LBB2_2:
-; RV32ZBB-NEXT:    sltu a4, a0, a3
+; RV32ZBB-NEXT:    sltu a4, a0, a2
 ; RV32ZBB-NEXT:    bnez a4, .LBB2_4
 ; RV32ZBB-NEXT:  .LBB2_3:
-; RV32ZBB-NEXT:    mv a3, a0
-; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv a2, a0
+; RV32ZBB-NEXT:    mv a3, a1
 ; RV32ZBB-NEXT:  .LBB2_4:
-; RV32ZBB-NEXT:    neg a0, a3
-; RV32ZBB-NEXT:    snez a1, a3
-; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    neg a0, a2
+; RV32ZBB-NEXT:    snez a1, a2
+; RV32ZBB-NEXT:    neg a2, a3
 ; RV32ZBB-NEXT:    sub a1, a2, a1
 ; RV32ZBB-NEXT:    ret
 ;
@@ -163,49 +163,49 @@ define i64 @expanded_neg_abs64(i64 %x) {
 define i64 @expanded_neg_abs64_unsigned(i64 %x) {
 ; RV32I-LABEL: expanded_neg_abs64_unsigned:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    snez a2, a0
-; RV32I-NEXT:    neg a3, a1
-; RV32I-NEXT:    sub a2, a3, a2
-; RV32I-NEXT:    neg a3, a0
-; RV32I-NEXT:    beq a2, a1, .LBB3_2
+; RV32I-NEXT:    neg a2, a0
+; RV32I-NEXT:    snez a3, a0
+; RV32I-NEXT:    neg a4, a1
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    beq a3, a1, .LBB3_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a4, a1, a2
+; RV32I-NEXT:    sltu a4, a1, a3
 ; RV32I-NEXT:    beqz a4, .LBB3_3
 ; RV32I-NEXT:    j .LBB3_4
 ; RV32I-NEXT:  .LBB3_2:
-; RV32I-NEXT:    sltu a4, a0, a3
+; RV32I-NEXT:    sltu a4, a0, a2
 ; RV32I-NEXT:    bnez a4, .LBB3_4
 ; RV32I-NEXT:  .LBB3_3:
-; RV32I-NEXT:    mv a3, a0
-; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:  .LBB3_4:
-; RV32I-NEXT:    neg a0, a3
-; RV32I-NEXT:    snez a1, a3
-; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    neg a0, a2
+; RV32I-NEXT:    snez a1, a2
+; RV32I-NEXT:    neg a2, a3
 ; RV32I-NEXT:    sub a1, a2, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: expanded_neg_abs64_unsigned:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    snez a2, a0
-; RV32ZBB-NEXT:    neg a3, a1
-; RV32ZBB-NEXT:    sub a2, a3, a2
-; RV32ZBB-NEXT:    neg a3, a0
-; RV32ZBB-NEXT:    beq a2, a1, .LBB3_2
+; RV32ZBB-NEXT:    neg a2, a0
+; RV32ZBB-NEXT:    snez a3, a0
+; RV32ZBB-NEXT:    neg a4, a1
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    beq a3, a1, .LBB3_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu a4, a1, a2
+; RV32ZBB-NEXT:    sltu a4, a1, a3
 ; RV32ZBB-NEXT:    beqz a4, .LBB3_3
 ; RV32ZBB-NEXT:    j .LBB3_4
 ; RV32ZBB-NEXT:  .LBB3_2:
-; RV32ZBB-NEXT:    sltu a4, a0, a3
+; RV32ZBB-NEXT:    sltu a4, a0, a2
 ; RV32ZBB-NEXT:    bnez a4, .LBB3_4
 ; RV32ZBB-NEXT:  .LBB3_3:
-; RV32ZBB-NEXT:    mv a3, a0
-; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv a2, a0
+; RV32ZBB-NEXT:    mv a3, a1
 ; RV32ZBB-NEXT:  .LBB3_4:
-; RV32ZBB-NEXT:    neg a0, a3
-; RV32ZBB-NEXT:    snez a1, a3
-; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    neg a0, a2
+; RV32ZBB-NEXT:    snez a1, a2
+; RV32ZBB-NEXT:    neg a2, a3
 ; RV32ZBB-NEXT:    sub a1, a2, a1
 ; RV32ZBB-NEXT:    ret
 ;
@@ -315,49 +315,49 @@ define i32 @expanded_neg_inv_abs32_unsigned(i32 %x) {
 define i64 @expanded_neg_inv_abs64(i64 %x) {
 ; RV32I-LABEL: expanded_neg_inv_abs64:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    snez a2, a0
-; RV32I-NEXT:    neg a3, a1
-; RV32I-NEXT:    sub a2, a3, a2
-; RV32I-NEXT:    neg a3, a0
-; RV32I-NEXT:    beq a2, a1, .LBB6_2
+; RV32I-NEXT:    neg a2, a0
+; RV32I-NEXT:    snez a3, a0
+; RV32I-NEXT:    neg a4, a1
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    beq a3, a1, .LBB6_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    slt a4, a2, a1
+; RV32I-NEXT:    slt a4, a3, a1
 ; RV32I-NEXT:    beqz a4, .LBB6_3
 ; RV32I-NEXT:    j .LBB6_4
 ; RV32I-NEXT:  .LBB6_2:
-; RV32I-NEXT:    sltu a4, a3, a0
+; RV32I-NEXT:    sltu a4, a2, a0
 ; RV32I-NEXT:    bnez a4, .LBB6_4
 ; RV32I-NEXT:  .LBB6_3:
-; RV32I-NEXT:    mv a3, a0
-; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:  .LBB6_4:
-; RV32I-NEXT:    neg a0, a3
-; RV32I-NEXT:    snez a1, a3
-; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    neg a0, a2
+; RV32I-NEXT:    snez a1, a2
+; RV32I-NEXT:    neg a2, a3
 ; RV32I-NEXT:    sub a1, a2, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: expanded_neg_inv_abs64:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    snez a2, a0
-; RV32ZBB-NEXT:    neg a3, a1
-; RV32ZBB-NEXT:    sub a2, a3, a2
-; RV32ZBB-NEXT:    neg a3, a0
-; RV32ZBB-NEXT:    beq a2, a1, .LBB6_2
+; RV32ZBB-NEXT:    neg a2, a0
+; RV32ZBB-NEXT:    snez a3, a0
+; RV32ZBB-NEXT:    neg a4, a1
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    beq a3, a1, .LBB6_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    slt a4, a2, a1
+; RV32ZBB-NEXT:    slt a4, a3, a1
 ; RV32ZBB-NEXT:    beqz a4, .LBB6_3
 ; RV32ZBB-NEXT:    j .LBB6_4
 ; RV32ZBB-NEXT:  .LBB6_2:
-; RV32ZBB-NEXT:    sltu a4, a3, a0
+; RV32ZBB-NEXT:    sltu a4, a2, a0
 ; RV32ZBB-NEXT:    bnez a4, .LBB6_4
 ; RV32ZBB-NEXT:  .LBB6_3:
-; RV32ZBB-NEXT:    mv a3, a0
-; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv a2, a0
+; RV32ZBB-NEXT:    mv a3, a1
 ; RV32ZBB-NEXT:  .LBB6_4:
-; RV32ZBB-NEXT:    neg a0, a3
-; RV32ZBB-NEXT:    snez a1, a3
-; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    neg a0, a2
+; RV32ZBB-NEXT:    snez a1, a2
+; RV32ZBB-NEXT:    neg a2, a3
 ; RV32ZBB-NEXT:    sub a1, a2, a1
 ; RV32ZBB-NEXT:    ret
 ;
@@ -385,49 +385,49 @@ define i64 @expanded_neg_inv_abs64(i64 %x) {
 define i64 @expanded_neg_inv_abs64_unsigned(i64 %x) {
 ; RV32I-LABEL: expanded_neg_inv_abs64_unsigned:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    snez a2, a0
-; RV32I-NEXT:    neg a3, a1
-; RV32I-NEXT:    sub a2, a3, a2
-; RV32I-NEXT:    neg a3, a0
-; RV32I-NEXT:    beq a2, a1, .LBB7_2
+; RV32I-NEXT:    neg a2, a0
+; RV32I-NEXT:    snez a3, a0
+; RV32I-NEXT:    neg a4, a1
+; RV32I-NEXT:    sub a3, a4, a3
+; RV32I-NEXT:    beq a3, a1, .LBB7_2
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    sltu a4, a2, a1
+; RV32I-NEXT:    sltu a4, a3, a1
 ; RV32I-NEXT:    beqz a4, .LBB7_3
 ; RV32I-NEXT:    j .LBB7_4
 ; RV32I-NEXT:  .LBB7_2:
-; RV32I-NEXT:    sltu a4, a3, a0
+; RV32I-NEXT:    sltu a4, a2, a0
 ; RV32I-NEXT:    bnez a4, .LBB7_4
 ; RV32I-NEXT:  .LBB7_3:
-; RV32I-NEXT:    mv a3, a0
-; RV32I-NEXT:    mv a2, a1
+; RV32I-NEXT:    mv a2, a0
+; RV32I-NEXT:    mv a3, a1
 ; RV32I-NEXT:  .LBB7_4:
-; RV32I-NEXT:    neg a0, a3
-; RV32I-NEXT:    snez a1, a3
-; RV32I-NEXT:    neg a2, a2
+; RV32I-NEXT:    neg a0, a2
+; RV32I-NEXT:    snez a1, a2
+; RV32I-NEXT:    neg a2, a3
 ; RV32I-NEXT:    sub a1, a2, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: expanded_neg_inv_abs64_unsigned:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    snez a2, a0
-; RV32ZBB-NEXT:    neg a3, a1
-; RV32ZBB-NEXT:    sub a2, a3, a2
-; RV32ZBB-NEXT:    neg a3, a0
-; RV32ZBB-NEXT:    beq a2, a1, .LBB7_2
+; RV32ZBB-NEXT:    neg a2, a0
+; RV32ZBB-NEXT:    snez a3, a0
+; RV32ZBB-NEXT:    neg a4, a1
+; RV32ZBB-NEXT:    sub a3, a4, a3
+; RV32ZBB-NEXT:    beq a3, a1, .LBB7_2
 ; RV32ZBB-NEXT:  # %bb.1:
-; RV32ZBB-NEXT:    sltu a4, a2, a1
+; RV32ZBB-NEXT:    sltu a4, a3, a1
 ; RV32ZBB-NEXT:    beqz a4, .LBB7_3
 ; RV32ZBB-NEXT:    j .LBB7_4
 ; RV32ZBB-NEXT:  .LBB7_2:
-; RV32ZBB-NEXT:    sltu a4, a3, a0
+; RV32ZBB-NEXT:    sltu a4, a2, a0
 ; RV32ZBB-NEXT:    bnez a4, .LBB7_4
 ; RV32ZBB-NEXT:  .LBB7_3:
-; RV32ZBB-NEXT:    mv a3, a0
-; RV32ZBB-NEXT:    mv a2, a1
+; RV32ZBB-NEXT:    mv a2, a0
+; RV32ZBB-NEXT:    mv a3, a1
 ; RV32ZBB-NEXT:  .LBB7_4:
-; RV32ZBB-NEXT:    neg a0, a3
-; RV32ZBB-NEXT:    snez a1, a3
-; RV32ZBB-NEXT:    neg a2, a2
+; RV32ZBB-NEXT:    neg a0, a2
+; RV32ZBB-NEXT:    snez a1, a2
+; RV32ZBB-NEXT:    neg a2, a3
 ; RV32ZBB-NEXT:    sub a1, a2, a1
 ; RV32ZBB-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
index cb2037f5fb0271e..28dde9a3472c253 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-arith.ll
@@ -424,11 +424,11 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s2, a2
 ; RV32I-NEXT:    mv s3, a3
 ; RV32I-NEXT:    mv a0, a4
-; RV32I-NEXT:    lui a1, %hi(.LCPI12_0)
-; RV32I-NEXT:    addi a1, a1, %lo(.LCPI12_0)
-; RV32I-NEXT:    lw a2, 0(a1)
-; RV32I-NEXT:    lw a3, 4(a1)
 ; RV32I-NEXT:    mv a1, a5
+; RV32I-NEXT:    lui a2, %hi(.LCPI12_0)
+; RV32I-NEXT:    addi a3, a2, %lo(.LCPI12_0)
+; RV32I-NEXT:    lw a2, 0(a3)
+; RV32I-NEXT:    lw a3, 4(a3)
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
@@ -454,9 +454,9 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv s1, a1
-; RV64I-NEXT:    lui a0, %hi(.LCPI12_0)
-; RV64I-NEXT:    ld a1, %lo(.LCPI12_0)(a0)
 ; RV64I-NEXT:    mv a0, a2
+; RV64I-NEXT:    lui a1, %hi(.LCPI12_0)
+; RV64I-NEXT:    ld a1, %lo(.LCPI12_0)(a1)
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
@@ -511,20 +511,20 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a3
 ; RV32I-NEXT:    mv s2, a4
+; RV32I-NEXT:    mv s3, a5
 ; RV32I-NEXT:    lui a2, %hi(.LCPI13_0)
 ; RV32I-NEXT:    addi a2, a2, %lo(.LCPI13_0)
-; RV32I-NEXT:    lw s3, 0(a2)
-; RV32I-NEXT:    lw s4, 4(a2)
-; RV32I-NEXT:    mv s5, a5
-; RV32I-NEXT:    mv a2, s3
-; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    lw s4, 0(a2)
+; RV32I-NEXT:    lw s5, 4(a2)
+; RV32I-NEXT:    mv a2, s4
+; RV32I-NEXT:    mv a3, s5
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s7, a1
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, s5
-; RV32I-NEXT:    mv a2, s3
-; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a2, s4
+; RV32I-NEXT:    mv a3, s5
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
@@ -556,14 +556,14 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    lui a1, %hi(.LCPI13_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI13_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    ld s2, %lo(.LCPI13_0)(a1)
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
@@ -625,20 +625,20 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv a0, a2
 ; RV32I-NEXT:    mv a1, a3
 ; RV32I-NEXT:    mv s2, a4
+; RV32I-NEXT:    mv s3, a5
 ; RV32I-NEXT:    lui a2, %hi(.LCPI14_0)
 ; RV32I-NEXT:    addi a2, a2, %lo(.LCPI14_0)
-; RV32I-NEXT:    lw s3, 0(a2)
-; RV32I-NEXT:    lw s4, 4(a2)
-; RV32I-NEXT:    mv s5, a5
-; RV32I-NEXT:    mv a2, s3
-; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    lw s4, 0(a2)
+; RV32I-NEXT:    lw s5, 4(a2)
+; RV32I-NEXT:    mv a2, s4
+; RV32I-NEXT:    mv a3, s5
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv s6, a0
 ; RV32I-NEXT:    mv s7, a1
 ; RV32I-NEXT:    mv a0, s2
-; RV32I-NEXT:    mv a1, s5
-; RV32I-NEXT:    mv a2, s3
-; RV32I-NEXT:    mv a3, s4
+; RV32I-NEXT:    mv a1, s3
+; RV32I-NEXT:    mv a2, s4
+; RV32I-NEXT:    mv a3, s5
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    mv a4, a0
 ; RV32I-NEXT:    lui a5, 524288
@@ -670,14 +670,14 @@ define double @fnmadd_d_2(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a0
 ; RV64I-NEXT:    mv a0, a1
+; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    lui a1, %hi(.LCPI14_0)
-; RV64I-NEXT:    ld s1, %lo(.LCPI14_0)(a1)
-; RV64I-NEXT:    mv s2, a2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    ld s2, %lo(.LCPI14_0)(a1)
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    mv s3, a0
-; RV64I-NEXT:    mv a0, s2
-; RV64I-NEXT:    mv a1, s1
+; RV64I-NEXT:    mv a0, s1
+; RV64I-NEXT:    mv a1, s2
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a2, a1, 63
@@ -799,11 +799,11 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV32I-NEXT:    mv s0, a2
 ; RV32I-NEXT:    mv s1, a3
 ; RV32I-NEXT:    mv s2, a4
+; RV32I-NEXT:    mv s3, a5
 ; RV32I-NEXT:    lui a2, %hi(.LCPI17_0)
 ; RV32I-NEXT:    addi a3, a2, %lo(.LCPI17_0)
 ; RV32I-NEXT:    lw a2, 0(a3)
 ; RV32I-NEXT:    lw a3, 4(a3)
-; RV32I-NEXT:    mv s3, a5
 ; RV32I-NEXT:    call __adddf3
 ; RV32I-NEXT:    lui a2, 524288
 ; RV32I-NEXT:    xor a1, a1, a2
@@ -827,9 +827,9 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind {
 ; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
 ; RV64I-NEXT:    mv s0, a1
+; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    lui a1, %hi(.LCPI17_0)
 ; RV64I-NEXT:    ld a1, %lo(.LCPI17_0)(a1)
-; RV64I-NEXT:    mv s1, a2
 ; RV64I-NEXT:    call __adddf3
 ; RV64I-NEXT:    li a1, -1
 ; RV64I-NEXT:    slli a1, a1, 63
@@ -880,1...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/126608


More information about the llvm-commits mailing list