[llvm] [RISCV] Replace undef with poison, NFC (PR #157396)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 8 00:27:49 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Jianjian Guan (jacquesguan)
<details>
<summary>Changes</summary>
Since undef is deprecated now, reuse of some tests case would cause CI failure, this pr replaces most undef with poison
---
Patch is 7.19 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/157396.diff
236 Files Affected:
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll (+25-25)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll (+50-50)
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/vararg.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/double-calling-conv.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/65704-illegal-instruction.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/allone-masked-to-unmasked.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/commutable.ll (+94-94)
- (modified) llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compress-fp.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-compress-int.ll (+16-16)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector-shuffle.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll (+46-46)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll (+61-61)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-fp.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll (+31-31)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll (+17-17)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-extract-subvector.ll (+7-7)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp-interleave.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int-interleave.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-int.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-rotate.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1down.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll (+23-23)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-negative.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store.ll (+27-27)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/frm-insert.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/interleave-crash.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/masked-load-fp.ll (+21-21)
- (modified) llvm/test/CodeGen/RISCV/rvv/masked-load-int-e64.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/masked-load-int.ll (+19-19)
- (modified) llvm/test/CodeGen/RISCV/rvv/masked-tama.ll (+57-57)
- (modified) llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/narrow-shift-extend.ll (+10-10)
- (modified) llvm/test/CodeGen/RISCV/rvv/pr106109.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-masked-vops.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll (+24-24)
- (modified) llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_x_f_qf.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vfnrclip_xu_f_qf.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/undef-vp-ops.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll (+20-20)
- (modified) llvm/test/CodeGen/RISCV/rvv/vaadd.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vaaddu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vadc.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vadd.ll (+68-68)
- (modified) llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/vand.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vandn.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/variant-cc.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vasub.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vasubu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vbrev.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vbrev8.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vclmul.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vclmulh.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vclz.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vcpopv.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vctz.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vdiv.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vdivu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-compress.ll (+43-43)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-reassociations.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-tuple-align.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfadd.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfclass.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-f-x.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-f-xu.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-x-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-rtz-xu-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-x-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfcvt-xu-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfdiv.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmax.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmerge.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmin.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmul.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmv.v.f.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfncvtbf16-f-f.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfrdiv.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfrec7.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfrsqrt7.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfrsub.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsgnj.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsgnjn.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsgnjx.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfslide1down.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfslide1up.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsqrt.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfsub.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwadd.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwadd.w.ll (+26-26)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-x.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-xu.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-x-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-rtz-xu-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-x-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvt-xu-f.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwcvtbf16-f-f.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwmul.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwsub.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfwsub.w.ll (+26-26)
- (modified) llvm/test/CodeGen/RISCV/rvv/vid.ll (+21-21)
- (modified) llvm/test/CodeGen/RISCV/rvv/viota.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll (+13-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vle.ll (+43-43)
- (modified) llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vleff.ll (+46-46)
- (modified) llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/vloxei.ll (+111-111)
- (modified) llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll (+988-988)
- (modified) llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll (+1304-1304)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlse.ll (+43-43)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll (+330-330)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll (+337-337)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll (+165-165)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll (+165-165)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll (+330-330)
- (modified) llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll (+330-330)
- (modified) llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll (+32-32)
- (modified) llvm/test/CodeGen/RISCV/rvv/vluxei.ll (+111-111)
- (modified) llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll (+988-988)
- (modified) llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll (+1311-1311)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmax.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmaxu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmerge.ll (+87-87)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmin.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vminu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmul.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmulh.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmulhsu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmulhu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.v.v-peephole.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.v.x.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vnclip.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vnclipu.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vnsra.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vnsrl.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vor.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrem.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vremu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrev8.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrgather.ll (+129-129)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrgatherei16.ll (+36-36)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrol.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vror.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vrsub.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsadd.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsaddu.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsbc.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-O0.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll (+63-63)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsext.ll (+28-28)
- (modified) llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vslide1down.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll (+9-9)
- (modified) llvm/test/CodeGen/RISCV/rvv/vslide1up.ll (+22-22)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsll.ll (+67-67)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsm3me.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsm4k.ll (+3-3)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsmul.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsra.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsrl.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vssub.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vssubu.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsub.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwadd.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwadd.w.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwaddu.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwaddu.w.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll (+72-72)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwmul.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwmulsu.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwmulu.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwsll.ll (+45-45)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwsub.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwsub.w.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwsubu.ll (+30-30)
- (modified) llvm/test/CodeGen/RISCV/rvv/vwsubu.w.ll (+44-44)
- (modified) llvm/test/CodeGen/RISCV/rvv/vxor.ll (+66-66)
- (modified) llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll (+37-37)
- (modified) llvm/test/CodeGen/RISCV/rvv/vzext.ll (+28-28)
- (modified) llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll (+5-5)
- (modified) llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/vararg.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/xcvmem.ll (+10-10)
``````````diff
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll
index 1cd43f4f89b00..5870ca08be9b5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/ret.ll
@@ -111,7 +111,7 @@ define ptr @test_ret_ptr() {
; RV64I-NEXT: $x10 = COPY [[DEF]](p0)
; RV64I-NEXT: PseudoRET implicit $x10
entry:
- ret ptr undef
+ ret ptr poison
}
define [2 x i32] @test_ret_2xi32() {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
index 89c7bfe81d5f9..3ce62e7909f39 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/shufflevector.ll
@@ -44,7 +44,7 @@ define <vscale x 1 x i1> @shufflevector_nxv1i1_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = shufflevector <vscale x 1 x i1> undef, <vscale x 1 x i1> undef, <vscale x 1 x i32> undef
+ %a = shufflevector <vscale x 1 x i1> poison, <vscale x 1 x i1> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i1> %a
}
@@ -114,7 +114,7 @@ define <vscale x 2 x i1> @shufflevector_nxv2i1_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = shufflevector <vscale x 2 x i1> undef, <vscale x 2 x i1> undef, <vscale x 2 x i32> undef
+ %a = shufflevector <vscale x 2 x i1> poison, <vscale x 2 x i1> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i1> %a
}
@@ -184,7 +184,7 @@ define <vscale x 4 x i1> @shufflevector_nxv4i1_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = shufflevector <vscale x 4 x i1> undef, <vscale x 4 x i1> undef, <vscale x 4 x i32> undef
+ %a = shufflevector <vscale x 4 x i1> poison, <vscale x 4 x i1> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i1> %a
}
@@ -254,7 +254,7 @@ define <vscale x 8 x i1> @shufflevector_nxv8i1_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = shufflevector <vscale x 8 x i1> undef, <vscale x 8 x i1> undef, <vscale x 8 x i32> undef
+ %a = shufflevector <vscale x 8 x i1> poison, <vscale x 8 x i1> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i1> %a
}
@@ -324,7 +324,7 @@ define <vscale x 16 x i1> @shufflevector_nxv16i1_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[EVEC]](s1)
; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
; RV64-NEXT: PseudoRET implicit $v0
- %a = shufflevector <vscale x 16 x i1> undef, <vscale x 16 x i1> undef, <vscale x 16 x i32> undef
+ %a = shufflevector <vscale x 16 x i1> poison, <vscale x 16 x i1> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i1> %a
}
@@ -394,7 +394,7 @@ define <vscale x 1 x i8> @shufflevector_nxv1i8_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i32> undef
+ %a = shufflevector <vscale x 1 x i8> poison, <vscale x 1 x i8> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i8> %a
}
@@ -464,7 +464,7 @@ define <vscale x 2 x i8> @shufflevector_nxv2i8_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i32> undef
+ %a = shufflevector <vscale x 2 x i8> poison, <vscale x 2 x i8> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i8> %a
}
@@ -534,7 +534,7 @@ define <vscale x 4 x i8> @shufflevector_nxv4i8_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i32> undef
+ %a = shufflevector <vscale x 4 x i8> poison, <vscale x 4 x i8> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i8> %a
}
@@ -604,7 +604,7 @@ define <vscale x 8 x i8> @shufflevector_nxv8i8_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i32> undef
+ %a = shufflevector <vscale x 8 x i8> poison, <vscale x 8 x i8> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i8> %a
}
@@ -674,7 +674,7 @@ define <vscale x 16 x i8> @shufflevector_nxv16i8_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[EVEC]](s8)
; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = shufflevector <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i32> undef
+ %a = shufflevector <vscale x 16 x i8> poison, <vscale x 16 x i8> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i8> %a
}
@@ -744,7 +744,7 @@ define <vscale x 1 x i16> @shufflevector_nxv1i16_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i32> undef
+ %a = shufflevector <vscale x 1 x i16> poison, <vscale x 1 x i16> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i16> %a
}
@@ -814,7 +814,7 @@ define <vscale x 2 x i16> @shufflevector_nxv2i16_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i32> undef
+ %a = shufflevector <vscale x 2 x i16> poison, <vscale x 2 x i16> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i16> %a
}
@@ -884,7 +884,7 @@ define <vscale x 4 x i16> @shufflevector_nxv4i16_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i32> undef
+ %a = shufflevector <vscale x 4 x i16> poison, <vscale x 4 x i16> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i16> %a
}
@@ -954,7 +954,7 @@ define <vscale x 8 x i16> @shufflevector_nxv8i16_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = shufflevector <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i32> undef
+ %a = shufflevector <vscale x 8 x i16> poison, <vscale x 8 x i16> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i16> %a
}
@@ -1024,7 +1024,7 @@ define <vscale x 16 x i16> @shufflevector_nxv16i16_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[EVEC]](s16)
; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m4
- %a = shufflevector <vscale x 16 x i16> undef, <vscale x 16 x i16> undef, <vscale x 16 x i32> undef
+ %a = shufflevector <vscale x 16 x i16> poison, <vscale x 16 x i16> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i16> %a
}
@@ -1094,7 +1094,7 @@ define <vscale x 1 x i32> @shufflevector_nxv1i32_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef
+ %a = shufflevector <vscale x 1 x i32> poison, <vscale x 1 x i32> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i32> %a
}
@@ -1164,7 +1164,7 @@ define <vscale x 2 x i32> @shufflevector_nxv2i32_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef
+ %a = shufflevector <vscale x 2 x i32> poison, <vscale x 2 x i32> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i32> %a
}
@@ -1234,7 +1234,7 @@ define <vscale x 4 x i32> @shufflevector_nxv4i32_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = shufflevector <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef
+ %a = shufflevector <vscale x 4 x i32> poison, <vscale x 4 x i32> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i32> %a
}
@@ -1304,7 +1304,7 @@ define <vscale x 8 x i32> @shufflevector_nxv8i32_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m4
- %a = shufflevector <vscale x 8 x i32> undef, <vscale x 8 x i32> undef, <vscale x 8 x i32> undef
+ %a = shufflevector <vscale x 8 x i32> poison, <vscale x 8 x i32> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i32> %a
}
@@ -1374,7 +1374,7 @@ define <vscale x 16 x i32> @shufflevector_nxv16i32_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[EVEC]](s32)
; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
; RV64-NEXT: PseudoRET implicit $v8m8
- %a = shufflevector <vscale x 16 x i32> undef, <vscale x 16 x i32> undef, <vscale x 16 x i32> undef
+ %a = shufflevector <vscale x 16 x i32> poison, <vscale x 16 x i32> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i32> %a
}
@@ -1444,7 +1444,7 @@ define <vscale x 1 x i64> @shufflevector_nxv1i64_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
; RV64-NEXT: PseudoRET implicit $v8
- %a = shufflevector <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i32> undef
+ %a = shufflevector <vscale x 1 x i64> poison, <vscale x 1 x i64> poison, <vscale x 1 x i32> poison
ret <vscale x 1 x i64> %a
}
@@ -1514,7 +1514,7 @@ define <vscale x 2 x i64> @shufflevector_nxv2i64_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m2
- %a = shufflevector <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i32> undef
+ %a = shufflevector <vscale x 2 x i64> poison, <vscale x 2 x i64> poison, <vscale x 2 x i32> poison
ret <vscale x 2 x i64> %a
}
@@ -1584,7 +1584,7 @@ define <vscale x 4 x i64> @shufflevector_nxv4i64_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m4
- %a = shufflevector <vscale x 4 x i64> undef, <vscale x 4 x i64> undef, <vscale x 4 x i32> undef
+ %a = shufflevector <vscale x 4 x i64> poison, <vscale x 4 x i64> poison, <vscale x 4 x i32> poison
ret <vscale x 4 x i64> %a
}
@@ -1654,7 +1654,7 @@ define <vscale x 8 x i64> @shufflevector_nxv8i64_1() {
; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[EVEC]](s64)
; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8
- %a = shufflevector <vscale x 8 x i64> undef, <vscale x 8 x i64> undef, <vscale x 8 x i32> undef
+ %a = shufflevector <vscale x 8 x i64> poison, <vscale x 8 x i64> poison, <vscale x 8 x i32> poison
ret <vscale x 8 x i64> %a
}
@@ -1732,7 +1732,7 @@ define <vscale x 16 x i64> @shufflevector_nxv16i64_1() {
; RV64-NEXT: $v8m8 = COPY [[UV]](<vscale x 8 x s64>)
; RV64-NEXT: $v16m8 = COPY [[UV1]](<vscale x 8 x s64>)
; RV64-NEXT: PseudoRET implicit $v8m8, implicit $v16m8
- %a = shufflevector <vscale x 16 x i64> undef, <vscale x 16 x i64> undef, <vscale x 16 x i32> undef
+ %a = shufflevector <vscale x 16 x i64> poison, <vscale x 16 x i64> poison, <vscale x 16 x i32> poison
ret <vscale x 16 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
index e985d1fc03864..74961d12c1c85 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vararg.ll
@@ -642,7 +642,7 @@ define void @va1_caller() nounwind {
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; LP64D-NEXT: PseudoRET
- %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2)
+ %1 = call i32 (ptr, ...) @va1(ptr poison, double 1.0, i32 2)
ret void
}
@@ -932,7 +932,7 @@ define void @va2_caller() nounwind {
; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2
; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; LP64D-NEXT: PseudoRET
- %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00)
+ %1 = call i64 (ptr, ...) @va2(ptr poison, double 1.000000e+00)
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
index c968d0726317f..73cfed184c425 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-bf16-err.ll
@@ -8,7 +8,7 @@
; then it will try to scalarize the argument to an s32, which may drop elements.
define <vscale x 1 x bfloat> @test_ret_nxv1bf16() {
entry:
- ret <vscale x 1 x bfloat> undef
+ ret <vscale x 1 x bfloat> poison
}
; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1bf16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
index f87ca94ceb4f1..a775b00d54757 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret-f16-err.ll
@@ -8,7 +8,7 @@
; then it will try to scalarize the argument to an s32, which may drop elements.
define <vscale x 1 x half> @test_ret_nxv1f16() {
entry:
- ret <vscale x 1 x half> undef
+ ret <vscale x 1 x half> poison
}
; CHECK: LLVM ERROR: unable to translate instruction: ret (in function: test_ret_nxv1f16)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
index 8e56942309ae8..4b1359e85bc59 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -21,7 +21,7 @@ define <vscale x 1 x i8> @test_ret_nxv1i8() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 1 x i8> undef
+ ret <vscale x 1 x i8> poison
}
define <vscale x 2 x i8> @test_ret_nxv2i8() {
@@ -37,7 +37,7 @@ define <vscale x 2 x i8> @test_ret_nxv2i8() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 2 x i8> undef
+ ret <vscale x 2 x i8> poison
}
define <vscale x 4 x i8> @test_ret_nxv4i8() {
@@ -53,7 +53,7 @@ define <vscale x 4 x i8> @test_ret_nxv4i8() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 4 x i8> undef
+ ret <vscale x 4 x i8> poison
}
define <vscale x 8 x i8> @test_ret_nxv8i8() {
@@ -69,7 +69,7 @@ define <vscale x 8 x i8> @test_ret_nxv8i8() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 8 x s8>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 8 x i8> undef
+ ret <vscale x 8 x i8> poison
}
define <vscale x 16 x i8> @test_ret_nxv16i8() {
@@ -85,7 +85,7 @@ define <vscale x 16 x i8> @test_ret_nxv16i8() {
; RV64-NEXT: $v8m2 = COPY [[DEF]](<vscale x 16 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m2
entry:
- ret <vscale x 16 x i8> undef
+ ret <vscale x 16 x i8> poison
}
define <vscale x 32 x i8> @test_ret_nxv32i8() {
@@ -101,7 +101,7 @@ define <vscale x 32 x i8> @test_ret_nxv32i8() {
; RV64-NEXT: $v8m4 = COPY [[DEF]](<vscale x 32 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m4
entry:
- ret <vscale x 32 x i8> undef
+ ret <vscale x 32 x i8> poison
}
define <vscale x 64 x i8> @test_ret_nxv64i8() {
@@ -117,7 +117,7 @@ define <vscale x 64 x i8> @test_ret_nxv64i8() {
; RV64-NEXT: $v8m8 = COPY [[DEF]](<vscale x 64 x s8>)
; RV64-NEXT: PseudoRET implicit $v8m8
entry:
- ret <vscale x 64 x i8> undef
+ ret <vscale x 64 x i8> poison
}
define <vscale x 1 x i16> @test_ret_nxv1i16() {
@@ -133,7 +133,7 @@ define <vscale x 1 x i16> @test_ret_nxv1i16() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 1 x i16> undef
+ ret <vscale x 1 x i16> poison
}
define <vscale x 2 x i16> @test_ret_nxv2i16() {
@@ -149,7 +149,7 @@ define <vscale x 2 x i16> @test_ret_nxv2i16() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 2 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 2 x i16> undef
+ ret <vscale x 2 x i16> poison
}
define <vscale x 4 x i16> @test_ret_nxv4i16() {
@@ -165,7 +165,7 @@ define <vscale x 4 x i16> @test_ret_nxv4i16() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 4 x s16>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 4 x i16> undef
+ ret <vscale x 4 x i16> poison
}
define <vscale x 8 x i16> @test_ret_nxv8i16() {
@@ -181,7 +181,7 @@ define <vscale x 8 x i16> @test_ret_nxv8i16() {
; RV64-NEXT: $v8m2 = COPY [[DEF]](<vscale x 8 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m2
entry:
- ret <vscale x 8 x i16> undef
+ ret <vscale x 8 x i16> poison
}
define <vscale x 16 x i16> @test_ret_nxv16i16() {
@@ -197,7 +197,7 @@ define <vscale x 16 x i16> @test_ret_nxv16i16() {
; RV64-NEXT: $v8m4 = COPY [[DEF]](<vscale x 16 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m4
entry:
- ret <vscale x 16 x i16> undef
+ ret <vscale x 16 x i16> poison
}
define <vscale x 32 x i16> @test_ret_nxv32i16() {
@@ -213,7 +213,7 @@ define <vscale x 32 x i16> @test_ret_nxv32i16() {
; RV64-NEXT: $v8m8 = COPY [[DEF]](<vscale x 32 x s16>)
; RV64-NEXT: PseudoRET implicit $v8m8
entry:
- ret <vscale x 32 x i16> undef
+ ret <vscale x 32 x i16> poison
}
define <vscale x 1 x i32> @test_ret_nxv1i32() {
@@ -229,7 +229,7 @@ define <vscale x 1 x i32> @test_ret_nxv1i32() {
; RV64-NEXT: $v8 = COPY [[DEF]](<vscale x 1 x s32>)
; RV64-NEXT: PseudoRET implicit $v8
entry:
- ret <vscale x 1 x i32> undef
+ ret <vscale x 1 x i32> poison
}
define <vscale x 2 x i32> @test_ret_nxv2i32() {
@@ -245,7 +245,7 @@ define <vscale x 2 x i32> @test_ret_nxv2i32() {
; RV64-NEXT: $v...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/157396
More information about the llvm-commits
mailing list