[llvm] [RISCV] Make InitUndef handle undef operand (PR #65755)

Piyou Chen via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 8 06:22:11 PDT 2023


https://github.com/BeMg created https://github.com/llvm/llvm-project/pull/65755:

https://github.com/llvm/llvm-project/issues/65704.

---

When operand be mark as undef, the InitUndef will miss this case. 

This patch

1. support the undef operand case
2. Merge the code for `handleImplictDef` and `fixupUndefOperandOnly` to reduce the searching logic.




>From 91553088a8799bc28433f85152059fe420d66fea Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Fri, 8 Sep 2023 01:34:08 -0700
Subject: [PATCH 1/3] [RISCV][NFC] precommit for 65704

---
 .../RISCV/65704-illegal-instruction.ll        | 807 ++++++++++++++++++
 1 file changed, 807 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll

diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
new file mode 100644
index 000000000000000..036a55a741a2ee0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -0,0 +1,807 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+m,+zfh,+zvfh \
+; RUN:  < %s | FileCheck %s
+
+%struct.png_row_info_struct = type { i32, i64, i8, i8, i8, i8 }
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(read)
+declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8>, ptr nocapture, i64) #2
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8>, i64 immarg) #3
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8>, <16 x i8>, i64 immarg) #3
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64, i64 immarg)
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, i64, i64, i64 immarg)
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(read)
+declare <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nocapture, i64) #2
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
+declare <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32>, <4 x i32>, i64 immarg) #3
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(write)
+declare void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32>, ptr nocapture, <vscale x 2 x i1>, i64) #4
+
+; Function Attrs: nofree noinline nosync nounwind uwtable vscale_range(2,2)
+define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, ptr nocapture noundef readonly %2) local_unnamed_addr #0 {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -144
+; CHECK-NEXT:    .cfi_def_cfa_offset 144
+; CHECK-NEXT:    sd ra, 136(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 128(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 120(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 112(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s3, 104(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s4, 96(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s5, 88(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s6, 80(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s7, 72(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s8, 64(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s9, 56(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s10, 48(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s11, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    .cfi_offset s3, -40
+; CHECK-NEXT:    .cfi_offset s4, -48
+; CHECK-NEXT:    .cfi_offset s5, -56
+; CHECK-NEXT:    .cfi_offset s6, -64
+; CHECK-NEXT:    .cfi_offset s7, -72
+; CHECK-NEXT:    .cfi_offset s8, -80
+; CHECK-NEXT:    .cfi_offset s9, -88
+; CHECK-NEXT:    .cfi_offset s10, -96
+; CHECK-NEXT:    .cfi_offset s11, -104
+; CHECK-NEXT:    csrr a3, vlenb
+; CHECK-NEXT:    li a4, 6
+; CHECK-NEXT:    mul a3, a3, a4
+; CHECK-NEXT:    sub sp, sp, a3
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 6 * vlenb
+; CHECK-NEXT:    ld s8, 8(a0)
+; CHECK-NEXT:    blez s8, .LBB0_4
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv s2, a2
+; CHECK-NEXT:    mv s3, a1
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a1)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
+; CHECK-NEXT:    vslideup.vi v10, v9, 7
+; CHECK-NEXT:    addi a0, sp, 32
+; CHECK-NEXT:    vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s0, v10
+; CHECK-NEXT:    vslidedown.vi v9, v10, 1
+; CHECK-NEXT:    vmv.x.s s1, v9
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v9, (a2)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vmv.x.s s4, v9
+; CHECK-NEXT:    vslidedown.vi v8, v9, 1
+; CHECK-NEXT:    vmv.x.s s5, v8
+; CHECK-NEXT:    mv a2, s4
+; CHECK-NEXT:    mv a3, s5
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:    mv a5, a1
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 3
+; CHECK-NEXT:    vslideup.vi v9, v8, 5
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 1
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s6, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s s7, v8
+; CHECK-NEXT:    mv a2, s6
+; CHECK-NEXT:    mv a3, s7
+; CHECK-NEXT:    mv a4, s4
+; CHECK-NEXT:    mv a5, s5
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    vslideup.vi v9, v8, 2
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v8, v8, 6
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s4, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s s5, v8
+; CHECK-NEXT:    mv a2, s4
+; CHECK-NEXT:    mv a3, s5
+; CHECK-NEXT:    mv a4, s6
+; CHECK-NEXT:    mv a5, s7
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    addi a2, s3, 12
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a2)
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    mv a2, s0
+; CHECK-NEXT:    mv a3, s1
+; CHECK-NEXT:    mv a4, s4
+; CHECK-NEXT:    mv a5, s5
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    addi a2, sp, 32
+; CHECK-NEXT:    vl1r.v v9, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    li s6, 1
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    addi s7, sp, 16
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (s3), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s3, 3
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 1
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 32
+; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (a0), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s3, 6
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 32
+; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (a0), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s3, 9
+; CHECK-NEXT:    li a1, 13
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    bltu s8, a1, .LBB0_4
+; CHECK-NEXT:  # %bb.2:
+; CHECK-NEXT:    add s8, s3, s8
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    addi s3, s3, 24
+; CHECK-NEXT:    addi s9, s2, 12
+; CHECK-NEXT:  .LBB0_3: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    mv s10, s3
+; CHECK-NEXT:    addi s11, s3, -12
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v9, (s9)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vmv.x.s s2, v9
+; CHECK-NEXT:    vslidedown.vi v8, v9, 1
+; CHECK-NEXT:    vmv.x.s s3, v8
+; CHECK-NEXT:    mv a2, s2
+; CHECK-NEXT:    mv a3, s3
+; CHECK-NEXT:    mv a4, s0
+; CHECK-NEXT:    mv a5, s1
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v9, 3
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v9, v8, 5
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v8, v8, 3
+; CHECK-NEXT:    vslideup.vi v8, v8, 5
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s4, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s s5, v8
+; CHECK-NEXT:    mv a2, s4
+; CHECK-NEXT:    mv a3, s5
+; CHECK-NEXT:    mv a4, s2
+; CHECK-NEXT:    mv a5, s3
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v9, v8, 6
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslideup.vi v9, v8, 2
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vslidedown.vi v8, v8, 6
+; CHECK-NEXT:    vslideup.vi v8, v8, 2
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s s2, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s s3, v8
+; CHECK-NEXT:    mv a2, s2
+; CHECK-NEXT:    mv a3, s3
+; CHECK-NEXT:    mv a4, s4
+; CHECK-NEXT:    mv a5, s5
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a1, a0, 2
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-NEXT:    vmv.x.s a1, v8
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (s10)
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a3, a2, 2
+; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 32
+; CHECK-NEXT:    vs1r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT:    mv a2, s0
+; CHECK-NEXT:    mv a3, s1
+; CHECK-NEXT:    mv a4, s2
+; CHECK-NEXT:    mv a5, s3
+; CHECK-NEXT:    call bar at plt
+; CHECK-NEXT:    addi a2, sp, 32
+; CHECK-NEXT:    vl1r.v v9, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslide1down.vx v8, v8, a0
+; CHECK-NEXT:    vslide1down.vx v8, v8, a1
+; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
+; CHECK-NEXT:    vadd.vv v8, v8, v9
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    slli a0, a0, 1
+; CHECK-NEXT:    add a0, sp, a0
+; CHECK-NEXT:    addi a0, a0, 32
+; CHECK-NEXT:    vl1r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (s11), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s10, -9
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 32
+; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (a0), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s10, -6
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 32
+; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vse32.v v10, (a0), v0.t
+; CHECK-NEXT:    sd s6, 16(sp)
+; CHECK-NEXT:    vlm.v v0, (s7)
+; CHECK-NEXT:    addi a0, s10, -3
+; CHECK-NEXT:    vse32.v v8, (a0), v0.t
+; CHECK-NEXT:    addi s3, s10, 12
+; CHECK-NEXT:    addi s9, s9, 12
+; CHECK-NEXT:    bltu s10, s8, .LBB0_3
+; CHECK-NEXT:  .LBB0_4:
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    li a1, 6
+; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    ld ra, 136(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 128(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 120(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 112(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s3, 104(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s4, 96(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s5, 88(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s6, 80(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s7, 72(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s8, 64(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s9, 56(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s10, 48(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s11, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 144
+; CHECK-NEXT:    ret
+  %4 = alloca i64, align 8
+  %5 = alloca i64, align 8
+  %6 = alloca i64, align 8
+  %7 = alloca i64, align 8
+  %8 = getelementptr inbounds %struct.png_row_info_struct, ptr %0, i64 0, i32 1
+  %9 = load i64, ptr %8, align 8
+  %10 = getelementptr inbounds i8, ptr %1, i64 %9
+  %11 = icmp sgt i64 %9, 0
+  br i1 %11, label %12, label %240
+
+12:                                               ; preds = %3
+  %13 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr %1, i64 16)
+  %14 = tail call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8> poison, i8 0, i64 8)
+  %15 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %14, i64 0)
+  %16 = bitcast <16 x i8> %15 to <2 x i64>
+  %17 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> undef, i64 0)
+  %18 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> poison, i64 0)
+  %19 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %17, i64 1, i64 8, i64 3)
+  %20 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %19, <vscale x 8 x i8> %17, i64 7, i64 8, i64 3)
+  %21 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %18, i64 1, i64 8, i64 3)
+  %22 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %21, <vscale x 8 x i8> %18, i64 7, i64 8, i64 3)
+  %23 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %22, i64 0)
+  %24 = bitcast <16 x i8> %23 to <2 x i64>
+  %25 = extractelement <2 x i64> %24, i64 0
+  %26 = insertvalue [2 x i64] poison, i64 %25, 0
+  %27 = extractelement <2 x i64> %24, i64 1
+  %28 = insertvalue [2 x i64] %26, i64 %27, 1
+  %29 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr %2, i64 16)
+  %30 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %29, i64 0)
+  %31 = bitcast <16 x i8> %30 to <2 x i64>
+  %32 = extractelement <2 x i64> %16, i64 0
+  %33 = insertvalue [2 x i64] poison, i64 %32, 0
+  %34 = extractelement <2 x i64> %16, i64 1
+  %35 = insertvalue [2 x i64] %33, i64 %34, 1
+  %36 = extractelement <2 x i64> %31, i64 0
+  %37 = insertvalue [2 x i64] poison, i64 %36, 0
+  %38 = extractelement <2 x i64> %31, i64 1
+  %39 = insertvalue [2 x i64] %37, i64 %38, 1
+  %40 = tail call fastcc [2 x i64] @bar([2 x i64] %35, [2 x i64] %39, [2 x i64] %35)
+  %41 = extractvalue [2 x i64] %40, 0
+  %42 = insertelement <2 x i64> undef, i64 %41, i64 0
+  %43 = extractvalue [2 x i64] %40, 1
+  %44 = insertelement <2 x i64> %42, i64 %43, i64 1
+  %45 = bitcast <2 x i64> %44 to <16 x i8>
+  %46 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %45, i64 0)
+  %47 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %46, <vscale x 8 x i8> %13, i64 8)
+  %48 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %47, i64 0)
+  %49 = bitcast <16 x i8> %48 to <2 x i64>
+  %50 = extractelement <2 x i64> %49, i64 0
+  %51 = insertvalue [2 x i64] poison, i64 %50, 0
+  %52 = extractelement <2 x i64> %49, i64 1
+  %53 = insertvalue [2 x i64] %51, i64 %52, 1
+  %54 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %13, i64 3, i64 8, i64 3)
+  %55 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %54, <vscale x 8 x i8> %17, i64 5, i64 8, i64 3)
+  %56 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %29, i64 3, i64 8, i64 3)
+  %57 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %56, <vscale x 8 x i8> %18, i64 5, i64 8, i64 3)
+  %58 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %57, i64 0)
+  %59 = bitcast <16 x i8> %58 to <2 x i64>
+  %60 = extractelement <2 x i64> %59, i64 0
+  %61 = insertvalue [2 x i64] poison, i64 %60, 0
+  %62 = extractelement <2 x i64> %59, i64 1
+  %63 = insertvalue [2 x i64] %61, i64 %62, 1
+  %64 = tail call fastcc [2 x i64] @bar([2 x i64] %53, [2 x i64] %63, [2 x i64] %39)
+  %65 = extractvalue [2 x i64] %64, 0
+  %66 = insertelement <2 x i64> undef, i64 %65, i64 0
+  %67 = extractvalue [2 x i64] %64, 1
+  %68 = insertelement <2 x i64> %66, i64 %67, i64 1
+  %69 = bitcast <2 x i64> %68 to <16 x i8>
+  %70 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %69, i64 0)
+  %71 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %70, <vscale x 8 x i8> %55, i64 8)
+  %72 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %71, i64 0)
+  %73 = bitcast <16 x i8> %72 to <2 x i64>
+  %74 = extractelement <2 x i64> %73, i64 0
+  %75 = insertvalue [2 x i64] poison, i64 %74, 0
+  %76 = extractelement <2 x i64> %73, i64 1
+  %77 = insertvalue [2 x i64] %75, i64 %76, 1
+  %78 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %13, i64 6, i64 8, i64 3)
+  %79 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %78, <vscale x 8 x i8> %17, i64 2, i64 8, i64 3)
+  %80 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %29, i64 6, i64 8, i64 3)
+  %81 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %80, <vscale x 8 x i8> %18, i64 2, i64 8, i64 3)
+  %82 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %81, i64 0)
+  %83 = bitcast <16 x i8> %82 to <2 x i64>
+  %84 = extractelement <2 x i64> %83, i64 0
+  %85 = insertvalue [2 x i64] poison, i64 %84, 0
+  %86 = extractelement <2 x i64> %83, i64 1
+  %87 = insertvalue [2 x i64] %85, i64 %86, 1
+  %88 = tail call fastcc [2 x i64] @bar([2 x i64] %77, [2 x i64] %87, [2 x i64] %63)
+  %89 = extractvalue [2 x i64] %88, 0
+  %90 = insertelement <2 x i64> undef, i64 %89, i64 0
+  %91 = extractvalue [2 x i64] %88, 1
+  %92 = insertelement <2 x i64> %90, i64 %91, i64 1
+  %93 = bitcast <2 x i64> %92 to <16 x i8>
+  %94 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %93, i64 0)
+  %95 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %94, <vscale x 8 x i8> %79, i64 8)
+  %96 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %95, i64 0)
+  %97 = bitcast <16 x i8> %96 to <2 x i64>
+  %98 = extractelement <2 x i64> %97, i64 0
+  %99 = insertvalue [2 x i64] poison, i64 %98, 0
+  %100 = extractelement <2 x i64> %97, i64 1
+  %101 = insertvalue [2 x i64] %99, i64 %100, 1
+  %102 = getelementptr inbounds i8, ptr %1, i64 12
+  %103 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr nonnull %102, i64 16)
+  %104 = tail call fastcc [2 x i64] @bar([2 x i64] %101, [2 x i64] %28, [2 x i64] %87)
+  %105 = extractvalue [2 x i64] %104, 0
+  %106 = insertelement <2 x i64> undef, i64 %105, i64 0
+  %107 = extractvalue [2 x i64] %104, 1
+  %108 = insertelement <2 x i64> %106, i64 %107, i64 1
+  %109 = bitcast <2 x i64> %108 to <16 x i8>
+  %110 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %109, i64 0)
+  %111 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %110, <vscale x 8 x i8> %20, i64 8)
+  %112 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %111, i64 0)
+  %113 = bitcast <16 x i8> %48 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4)
+  store i64 1, ptr %4, align 8
+  %114 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %4, i64 2)
+  %115 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %113, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %115, ptr %1, <vscale x 2 x i1> %114, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4)
+  %116 = getelementptr inbounds i8, ptr %1, i64 3
+  %117 = bitcast <16 x i8> %72 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %5)
+  store i64 1, ptr %5, align 8
+  %118 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %5, i64 2)
+  %119 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %117, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %119, ptr nonnull %116, <vscale x 2 x i1> %118, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %5)
+  %120 = getelementptr inbounds i8, ptr %1, i64 6
+  %121 = bitcast <16 x i8> %96 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %6)
+  store i64 1, ptr %6, align 8
+  %122 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %6, i64 2)
+  %123 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %121, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %123, ptr nonnull %120, <vscale x 2 x i1> %122, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %6)
+  %124 = getelementptr inbounds i8, ptr %1, i64 9
+  %125 = bitcast <16 x i8> %112 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %7)
+  store i64 1, ptr %7, align 8
+  %126 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %7, i64 2)
+  %127 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %125, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %127, ptr nonnull %124, <vscale x 2 x i1> %126, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %7)
+  %128 = icmp ugt i64 %9, 12
+  br i1 %128, label %129, label %240
+
+129:                                              ; preds = %12
+  %130 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %103, i64 0)
+  br label %131
+
+131:                                              ; preds = %129, %131
+  %132 = phi ptr [ %102, %129 ], [ %212, %131 ]
+  %133 = phi ptr [ %2, %129 ], [ %137, %131 ]
+  %134 = phi <16 x i8> [ %130, %129 ], [ %214, %131 ]
+  %135 = phi <16 x i8> [ %112, %129 ], [ %223, %131 ]
+  %136 = bitcast <16 x i8> %135 to <2 x i64>
+  %137 = getelementptr inbounds i8, ptr %133, i64 12
+  %138 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr nonnull %137, i64 16)
+  %139 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %138, i64 0)
+  %140 = bitcast <16 x i8> %139 to <2 x i64>
+  %141 = extractelement <2 x i64> %136, i64 0
+  %142 = insertvalue [2 x i64] poison, i64 %141, 0
+  %143 = extractelement <2 x i64> %136, i64 1
+  %144 = insertvalue [2 x i64] %142, i64 %143, 1
+  %145 = extractelement <2 x i64> %140, i64 0
+  %146 = insertvalue [2 x i64] poison, i64 %145, 0
+  %147 = extractelement <2 x i64> %140, i64 1
+  %148 = insertvalue [2 x i64] %146, i64 %147, 1
+  %149 = tail call fastcc [2 x i64] @bar([2 x i64] %144, [2 x i64] %148, [2 x i64] %28)
+  %150 = extractvalue [2 x i64] %149, 0
+  %151 = insertelement <2 x i64> undef, i64 %150, i64 0
+  %152 = extractvalue [2 x i64] %149, 1
+  %153 = insertelement <2 x i64> %151, i64 %152, i64 1
+  %154 = bitcast <2 x i64> %153 to <16 x i8>
+  %155 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %154, i64 0)
+  %156 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %134, i64 0)
+  %157 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %155, <vscale x 8 x i8> %156, i64 8)
+  %158 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %157, i64 0)
+  %159 = bitcast <16 x i8> %158 to <2 x i64>
+  %160 = extractelement <2 x i64> %159, i64 0
+  %161 = insertvalue [2 x i64] poison, i64 %160, 0
+  %162 = extractelement <2 x i64> %159, i64 1
+  %163 = insertvalue [2 x i64] %161, i64 %162, 1
+  %164 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %156, i64 3, i64 8, i64 3)
+  %165 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %164, <vscale x 8 x i8> %17, i64 5, i64 8, i64 3)
+  %166 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %138, i64 3, i64 8, i64 3)
+  %167 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %166, <vscale x 8 x i8> %18, i64 5, i64 8, i64 3)
+  %168 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %167, i64 0)
+  %169 = bitcast <16 x i8> %168 to <2 x i64>
+  %170 = extractelement <2 x i64> %169, i64 0
+  %171 = insertvalue [2 x i64] poison, i64 %170, 0
+  %172 = extractelement <2 x i64> %169, i64 1
+  %173 = insertvalue [2 x i64] %171, i64 %172, 1
+  %174 = tail call fastcc [2 x i64] @bar([2 x i64] %163, [2 x i64] %173, [2 x i64] %148)
+  %175 = extractvalue [2 x i64] %174, 0
+  %176 = insertelement <2 x i64> undef, i64 %175, i64 0
+  %177 = extractvalue [2 x i64] %174, 1
+  %178 = insertelement <2 x i64> %176, i64 %177, i64 1
+  %179 = bitcast <2 x i64> %178 to <16 x i8>
+  %180 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %179, i64 0)
+  %181 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %180, <vscale x 8 x i8> %165, i64 8)
+  %182 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %181, i64 0)
+  %183 = bitcast <16 x i8> %182 to <2 x i64>
+  %184 = extractelement <2 x i64> %183, i64 0
+  %185 = insertvalue [2 x i64] poison, i64 %184, 0
+  %186 = extractelement <2 x i64> %183, i64 1
+  %187 = insertvalue [2 x i64] %185, i64 %186, 1
+  %188 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %156, i64 6, i64 8, i64 3)
+  %189 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %188, <vscale x 8 x i8> %17, i64 2, i64 8, i64 3)
+  %190 = tail call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %138, i64 6, i64 8, i64 3)
+  %191 = tail call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> %190, <vscale x 8 x i8> %18, i64 2, i64 8, i64 3)
+  %192 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %191, i64 0)
+  %193 = bitcast <16 x i8> %192 to <2 x i64>
+  %194 = extractelement <2 x i64> %193, i64 0
+  %195 = insertvalue [2 x i64] poison, i64 %194, 0
+  %196 = extractelement <2 x i64> %193, i64 1
+  %197 = insertvalue [2 x i64] %195, i64 %196, 1
+  %198 = tail call fastcc [2 x i64] @bar([2 x i64] %187, [2 x i64] %197, [2 x i64] %173)
+  %199 = extractvalue [2 x i64] %198, 0
+  %200 = insertelement <2 x i64> undef, i64 %199, i64 0
+  %201 = extractvalue [2 x i64] %198, 1
+  %202 = insertelement <2 x i64> %200, i64 %201, i64 1
+  %203 = bitcast <2 x i64> %202 to <16 x i8>
+  %204 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %203, i64 0)
+  %205 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %204, <vscale x 8 x i8> %189, i64 8)
+  %206 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %205, i64 0)
+  %207 = bitcast <16 x i8> %206 to <2 x i64>
+  %208 = extractelement <2 x i64> %207, i64 0
+  %209 = insertvalue [2 x i64] poison, i64 %208, 0
+  %210 = extractelement <2 x i64> %207, i64 1
+  %211 = insertvalue [2 x i64] %209, i64 %210, 1
+  %212 = getelementptr inbounds i8, ptr %132, i64 12
+  %213 = tail call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8.i64(<vscale x 8 x i8> poison, ptr nonnull %212, i64 16)
+  %214 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %213, i64 0)
+  %215 = tail call fastcc [2 x i64] @bar([2 x i64] %211, [2 x i64] %28, [2 x i64] %197)
+  %216 = extractvalue [2 x i64] %215, 0
+  %217 = insertelement <2 x i64> undef, i64 %216, i64 0
+  %218 = extractvalue [2 x i64] %215, 1
+  %219 = insertelement <2 x i64> %217, i64 %218, i64 1
+  %220 = bitcast <2 x i64> %219 to <16 x i8>
+  %221 = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v16i8(<vscale x 8 x i8> undef, <16 x i8> %220, i64 0)
+  %222 = tail call <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> %221, <vscale x 8 x i8> %20, i64 8)
+  %223 = tail call <16 x i8> @llvm.vector.extract.v16i8.nxv8i8(<vscale x 8 x i8> %222, i64 0)
+  %224 = bitcast <16 x i8> %158 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %4)
+  store i64 1, ptr %4, align 8
+  %225 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %4, i64 2)
+  %226 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %224, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %226, ptr nonnull %132, <vscale x 2 x i1> %225, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %4)
+  %227 = getelementptr inbounds i8, ptr %132, i64 3
+  %228 = bitcast <16 x i8> %182 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %5)
+  store i64 1, ptr %5, align 8
+  %229 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %5, i64 2)
+  %230 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %228, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %230, ptr nonnull %227, <vscale x 2 x i1> %229, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %5)
+  %231 = getelementptr inbounds i8, ptr %132, i64 6
+  %232 = bitcast <16 x i8> %206 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %6)
+  store i64 1, ptr %6, align 8
+  %233 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %6, i64 2)
+  %234 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %232, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %234, ptr nonnull %231, <vscale x 2 x i1> %233, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %6)
+  %235 = getelementptr inbounds i8, ptr %132, i64 9
+  %236 = bitcast <16 x i8> %223 to <4 x i32>
+  call void @llvm.lifetime.start.p0(i64 8, ptr nonnull %7)
+  store i64 1, ptr %7, align 8
+  %237 = call <vscale x 2 x i1> @llvm.riscv.vlm.nxv2i1.i64(ptr nonnull %7, i64 2)
+  %238 = tail call <vscale x 2 x i32> @llvm.vector.insert.nxv2i32.v4i32(<vscale x 2 x i32> undef, <4 x i32> %236, i64 0)
+  tail call void @llvm.riscv.vse.mask.nxv2i32.i64(<vscale x 2 x i32> %238, ptr nonnull %235, <vscale x 2 x i1> %237, i64 2)
+  call void @llvm.lifetime.end.p0(i64 8, ptr nonnull %7)
+  %239 = icmp ult ptr %212, %10
+  br i1 %239, label %131, label %240
+
+240:                                              ; preds = %131, %12, %3
+  ret void
+}
+
+; Function Attrs: mustprogress nofree noinline nosync nounwind willreturn memory(none) uwtable vscale_range(2,2)
+declare fastcc [2 x i64] @bar([2 x i64] %0, [2 x i64] %1, [2 x i64] %2)
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.riscv.vadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, i64)
+
+; Function Attrs: mustprogress nocallback nofree nosync nounwind willreturn memory(none)
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(<vscale x 8 x i8>, i8, i64)

>From f1804a2e143e8b1d79a0fbb52cadc2134acf6d16 Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Thu, 7 Sep 2023 23:20:50 -0700
Subject: [PATCH 2/3] [RISCV] InitUndef also handle undef

Bug report from https://github.com/llvm/llvm-project/issues/65704.

The InitUndef pass miss the pattern that operand is not implict_def but undef directly.

This patch support this pattern in InitUndef pass.
---
 llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp   | 27 +++++++++++++++++++
 .../RISCV/65704-illegal-instruction.ll        |  4 +--
 ...regalloc-last-chance-recoloring-failure.ll |  4 +--
 3 files changed, 31 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
index 7c6a89b6036fa3c..706758a2200115b 100644
--- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -84,6 +84,7 @@ class RISCVInitUndef : public MachineFunctionPass {
   getVRLargestSuperClass(const TargetRegisterClass *RC) const;
   bool handleSubReg(MachineFunction &MF, MachineInstr &MI,
                     const DeadLaneDetector &DLD);
+  bool fixupUndefOperandOnly(MachineInstr *MI);
 };
 
 } // end anonymous namespace
@@ -245,6 +246,30 @@ bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
   return Changed;
 }
 
+bool RISCVInitUndef::fixupUndefOperandOnly(MachineInstr *MI) {
+  bool Changed = false;
+  for (auto &UseMO : MI->uses()) {
+    if (!UseMO.isReg())
+      continue;
+    if (UseMO.isTied())
+      continue;
+    if (!UseMO.isUndef())
+      continue;
+    if (!isVectorRegClass(UseMO.getReg()))
+      continue;
+    const TargetRegisterClass *TargetRegClass =
+        getVRLargestSuperClass(MRI->getRegClass(UseMO.getReg()));
+    unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
+    Register NewReg = MRI->createVirtualRegister(TargetRegClass);
+    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
+    UseMO.setReg(NewReg);
+    UseMO.setIsUndef(false);
+    Changed = true;
+  }
+
+  return Changed;
+}
+
 bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
                                        MachineBasicBlock &MBB,
                                        const DeadLaneDetector &DLD) {
@@ -273,6 +298,8 @@ bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
 
     if (ST->enableSubRegLiveness() && isEarlyClobberMI(MI))
       Changed |= handleSubReg(MF, MI, DLD);
+    if (isEarlyClobberMI(MI))
+      Changed |= fixupUndefOperandOnly(&MI);
     if (MI.isImplicitDef()) {
       auto DstReg = MI.getOperand(0).getReg();
       if (isVectorRegClass(DstReg))
diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
index 036a55a741a2ee0..ab0513ff34c342e 100644
--- a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -360,7 +360,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 3
-; CHECK-NEXT:    vslideup.vi v8, v8, 5
+; CHECK-NEXT:    vslideup.vi v8, v9, 5
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s s4, v8
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
@@ -414,7 +414,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 6
-; CHECK-NEXT:    vslideup.vi v8, v8, 2
+; CHECK-NEXT:    vslideup.vi v8, v9, 2
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vmv.x.s s2, v8
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 1
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index c15321057aeb86b..8f7923889b99e85 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -117,13 +117,13 @@ define void @last_chance_recoloring_failure() {
 ; SUBREGLIVENESS-NEXT:    vmclr.m v0
 ; SUBREGLIVENESS-NEXT:    li s0, 36
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    addi a0, sp, 16
 ; SUBREGLIVENESS-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
 ; SUBREGLIVENESS-NEXT:    call func at plt
 ; SUBREGLIVENESS-NEXT:    li a0, 32
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT:    vrgather.vv v16, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT:    vrgather.vv v16, v8, v12, v0.t
 ; SUBREGLIVENESS-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; SUBREGLIVENESS-NEXT:    csrr a1, vlenb
 ; SUBREGLIVENESS-NEXT:    slli a1, a1, 3

>From a8ae179dab053e72bac3c73e3b3a879f7f37294f Mon Sep 17 00:00:00 2001
From: Piyou Chen <piyou.chen at sifive.com>
Date: Fri, 8 Sep 2023 05:10:38 -0700
Subject: [PATCH 3/3] [RISCV] Merge handleImplicitDef and fixupUndefOperandOnly

They share the same pattern of replacing the Operand with PseudoRVVInitUndef.

This patch

1. reduces the logic for finding MachineInstr that needs to be fixed.
2. emit PseudoRVVInitUndef just before the user's operand to reduce register pressure (shorter LiveInterval).
---
 llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp   | 117 +++++++-----------
 .../RISCV/65704-illegal-instruction.ll        |  94 ++++++--------
 ...regalloc-last-chance-recoloring-failure.ll |   4 +-
 .../RISCV/rvv/undef-earlyclobber-chain.mir    |   4 +-
 4 files changed, 82 insertions(+), 137 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
index 706758a2200115b..2f838392eb6b34d 100644
--- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp
@@ -77,14 +77,13 @@ class RISCVInitUndef : public MachineFunctionPass {
 private:
   bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
                          const DeadLaneDetector &DLD);
-  bool handleImplicitDef(MachineBasicBlock &MBB,
-                         MachineBasicBlock::iterator &Inst);
   bool isVectorRegClass(const Register R);
   const TargetRegisterClass *
   getVRLargestSuperClass(const TargetRegisterClass *RC) const;
   bool handleSubReg(MachineFunction &MF, MachineInstr &MI,
                     const DeadLaneDetector &DLD);
-  bool fixupUndefOperandOnly(MachineInstr *MI);
+  bool fixupIllOperand(MachineInstr *MI, MachineOperand &MO);
+  bool handleReg(MachineInstr *MI);
 };
 
 } // end anonymous namespace
@@ -135,53 +134,32 @@ static bool isEarlyClobberMI(MachineInstr &MI) {
   });
 }
 
-bool RISCVInitUndef::handleImplicitDef(MachineBasicBlock &MBB,
-                                       MachineBasicBlock::iterator &Inst) {
-  assert(Inst->getOpcode() == TargetOpcode::IMPLICIT_DEF);
-
-  Register Reg = Inst->getOperand(0).getReg();
-  if (!Reg.isVirtual())
-    return false;
-
-  bool HasOtherUse = false;
-  SmallVector<MachineOperand *, 1> UseMOs;
-  for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
-    if (isEarlyClobberMI(*MO.getParent())) {
-      if (MO.isUse() && !MO.isTied())
-        UseMOs.push_back(&MO);
-      else
-        HasOtherUse = true;
-    }
+static bool findImplictDefMIFromReg(Register Reg, MachineRegisterInfo *MRI) {
+  for (auto &DefMI : MRI->def_instructions(Reg)) {
+    if (DefMI.getOpcode() == TargetOpcode::IMPLICIT_DEF)
+      return true;
   }
+  return false;
+}
 
-  if (UseMOs.empty())
-    return false;
-
-  LLVM_DEBUG(
-      dbgs() << "Emitting PseudoRVVInitUndef for implicit vector register "
-             << Reg << '\n');
-
-  const TargetRegisterClass *TargetRegClass =
-    getVRLargestSuperClass(MRI->getRegClass(Reg));
-  unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
-
-  Register NewDest = Reg;
-  if (HasOtherUse) {
-    NewDest = MRI->createVirtualRegister(TargetRegClass);
-    // We don't have a way to update dead lanes, so keep track of the
-    // new register so that we avoid querying it later.
-    NewRegs.insert(NewDest);
-  }
-  BuildMI(MBB, Inst, Inst->getDebugLoc(), TII->get(Opcode), NewDest);
-
-  if (!HasOtherUse)
-    Inst = MBB.erase(Inst);
+bool RISCVInitUndef::handleReg(MachineInstr *MI) {
+  bool Changed = false;
+  for (auto &UseMO : MI->uses()) {
+    if (!UseMO.isReg())
+      continue;
+    if (UseMO.isTied())
+      continue;
+    if (!UseMO.getReg().isVirtual())
+      continue;
+    if (!isVectorRegClass(UseMO.getReg()))
+      continue;
+    if (UseMO.getReg() == 0)
+      continue;
 
-  for (auto MO : UseMOs) {
-    MO->setReg(NewDest);
-    MO->setIsUndef(false);
+    if (UseMO.isUndef() || findImplictDefMIFromReg(UseMO.getReg(), MRI))
+      Changed |= fixupIllOperand(MI, UseMO);
   }
-  return true;
+  return Changed;
 }
 
 bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
@@ -246,28 +224,21 @@ bool RISCVInitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
   return Changed;
 }
 
-bool RISCVInitUndef::fixupUndefOperandOnly(MachineInstr *MI) {
-  bool Changed = false;
-  for (auto &UseMO : MI->uses()) {
-    if (!UseMO.isReg())
-      continue;
-    if (UseMO.isTied())
-      continue;
-    if (!UseMO.isUndef())
-      continue;
-    if (!isVectorRegClass(UseMO.getReg()))
-      continue;
-    const TargetRegisterClass *TargetRegClass =
-        getVRLargestSuperClass(MRI->getRegClass(UseMO.getReg()));
-    unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
-    Register NewReg = MRI->createVirtualRegister(TargetRegClass);
-    BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
-    UseMO.setReg(NewReg);
-    UseMO.setIsUndef(false);
-    Changed = true;
-  }
+bool RISCVInitUndef::fixupIllOperand(MachineInstr *MI, MachineOperand &MO) {
 
-  return Changed;
+  LLVM_DEBUG(
+      dbgs() << "Emitting PseudoRVVInitUndef for implicit vector register "
+             << MO.getReg() << '\n');
+
+  const TargetRegisterClass *TargetRegClass =
+      getVRLargestSuperClass(MRI->getRegClass(MO.getReg()));
+  unsigned Opcode = getUndefInitOpcode(TargetRegClass->getID());
+  Register NewReg = MRI->createVirtualRegister(TargetRegClass);
+  BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(Opcode), NewReg);
+  MO.setReg(NewReg);
+  if (MO.isUndef())
+    MO.setIsUndef(false);
+  return true;
 }
 
 bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
@@ -296,14 +267,10 @@ bool RISCVInitUndef::processBasicBlock(MachineFunction &MF,
       }
     }
 
-    if (ST->enableSubRegLiveness() && isEarlyClobberMI(MI))
-      Changed |= handleSubReg(MF, MI, DLD);
-    if (isEarlyClobberMI(MI))
-      Changed |= fixupUndefOperandOnly(&MI);
-    if (MI.isImplicitDef()) {
-      auto DstReg = MI.getOperand(0).getReg();
-      if (isVectorRegClass(DstReg))
-        Changed |= handleImplicitDef(MBB, I);
+    if (isEarlyClobberMI(MI)) {
+      if (ST->enableSubRegLiveness())
+        Changed |= handleSubReg(MF, MI, DLD);
+      Changed |= handleReg(&MI);
     }
   }
   return Changed;
diff --git a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
index ab0513ff34c342e..ca541daf63a4763 100644
--- a/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
+++ b/llvm/test/CodeGen/RISCV/65704-illegal-instruction.ll
@@ -79,7 +79,8 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a1)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -96,8 +97,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (a2)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 2
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
@@ -118,14 +118,14 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -137,13 +137,11 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslidedown.vi v9, v9, 3
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 1
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
@@ -163,13 +161,11 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -179,20 +175,21 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 6
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
@@ -212,13 +209,15 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -230,8 +229,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a2)
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v8, (a2) # Unknown-size Folded Spill
@@ -253,8 +251,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlm.v v0, (s7)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 1
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v10, (a0) # Unknown-size Folded Reload
@@ -263,7 +260,6 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vlm.v v0, (s7)
 ; CHECK-NEXT:    addi a0, s3, 3
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 1
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 32
 ; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
@@ -272,7 +268,8 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vlm.v v0, (s7)
 ; CHECK-NEXT:    addi a0, s3, 6
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    slli a2, a1, 1
+; CHECK-NEXT:    add a1, a2, a1
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 32
 ; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
@@ -285,12 +282,6 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    bltu s8, a1, .LBB0_4
 ; CHECK-NEXT:  # %bb.2:
 ; CHECK-NEXT:    add s8, s3, s8
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 32
-; CHECK-NEXT:    vs1r.v v10, (a0) # Unknown-size Folded Spill
 ; CHECK-NEXT:    addi s3, s3, 24
 ; CHECK-NEXT:    addi s9, s2, 12
 ; CHECK-NEXT:  .LBB0_3: # =>This Inner Loop Header: Depth=1
@@ -299,7 +290,8 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v9, (s9)
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
@@ -320,14 +312,12 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 2
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 2
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v9, (a0) # Unknown-size Folded Spill
@@ -343,19 +333,14 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v9, v9, 3
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 1
-; CHECK-NEXT:    add a2, a3, a2
-; CHECK-NEXT:    add a2, sp, a2
-; CHECK-NEXT:    addi a2, a2, 32
-; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslideup.vi v9, v8, 5
 ; CHECK-NEXT:    csrr a2, vlenb
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
@@ -389,27 +374,20 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslidedown.vi v9, v8, 6
-; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 1
-; CHECK-NEXT:    add a2, a3, a2
-; CHECK-NEXT:    add a2, sp, a2
-; CHECK-NEXT:    addi a2, a2, 32
-; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslideup.vi v9, v8, 2
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v9, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 2
+; CHECK-NEXT:    slli a3, a2, 1
+; CHECK-NEXT:    add a2, a3, a2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vl1r.v v8, (a2) # Unknown-size Folded Reload
@@ -429,14 +407,14 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a1
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a1, a0, 2
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vl1r.v v9, (a0) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 2
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 32
 ; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -447,8 +425,7 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (s10)
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a3, a2, 2
-; CHECK-NEXT:    add a2, a3, a2
+; CHECK-NEXT:    slli a2, a2, 2
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 32
 ; CHECK-NEXT:    vs1r.v v8, (a2) # Unknown-size Folded Spill
@@ -485,7 +462,8 @@ define dso_local void @foo(ptr nocapture noundef readonly %0, ptr noundef %1, pt
 ; CHECK-NEXT:    vlm.v v0, (s7)
 ; CHECK-NEXT:    addi a0, s10, -6
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    slli a2, a1, 1
+; CHECK-NEXT:    add a1, a2, a1
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 32
 ; CHECK-NEXT:    vl1r.v v10, (a1) # Unknown-size Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 8f7923889b99e85..b7eac0ba4e4cc9c 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -40,7 +40,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    vmclr.m v0
 ; CHECK-NEXT:    li s0, 36
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwadd.vv v16, v8, v8, v0.t
+; CHECK-NEXT:    vfwadd.vv v16, v8, v12, v0.t
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add a0, sp, a0
@@ -49,7 +49,7 @@ define void @last_chance_recoloring_failure() {
 ; CHECK-NEXT:    call func at plt
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vrgather.vv v4, v8, v8, v0.t
+; CHECK-NEXT:    vrgather.vv v4, v8, v12, v0.t
 ; CHECK-NEXT:    vsetvli zero, s0, e16, m4, ta, ma
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
index 08ea967179ebf83..58b2687824aa146 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.mir
@@ -76,9 +76,9 @@ machineFunctionInfo:
 body:             |
   bb.0.entry:
     ; CHECK-LABEL: name: undef_early_clobber_chain
-    ; CHECK: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+    ; CHECK: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
     ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 0, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
+    ; CHECK-NEXT: [[PseudoRVVInitUndefM1_:%[0-9]+]]:vr = PseudoRVVInitUndefM1
     ; CHECK-NEXT: early-clobber %1:vr = PseudoVRGATHER_VI_M1 undef [[DEF]], [[PseudoRVVInitUndefM1_]], 0, 0, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
     ; CHECK-NEXT: $v8 = COPY %1
     ; CHECK-NEXT: PseudoRET implicit $v8



More information about the llvm-commits mailing list