[llvm] ed93e6b - [RISCV] Added test for dag spill fix

Mikhail R. Gadelha via llvm-commits llvm-commits at lists.llvm.org
Sat Mar 8 09:56:07 PST 2025


Author: Mikhail R. Gadelha
Date: 2025-03-08T14:55:20-03:00
New Revision: ed93e6b0e6bb83a356ad3a34f9e6c989db7b59f3

URL: https://github.com/llvm/llvm-project/commit/ed93e6b0e6bb83a356ad3a34f9e6c989db7b59f3
DIFF: https://github.com/llvm/llvm-project/commit/ed93e6b0e6bb83a356ad3a34f9e6c989db7b59f3.diff

LOG: [RISCV] Added test for dag spill fix

Added: 
    llvm/test/CodeGen/RISCV/stores-of-loads-merging.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/stores-of-loads-merging.ll b/llvm/test/CodeGen/RISCV/stores-of-loads-merging.ll
new file mode 100644
index 0000000000000..b2be401b4676f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/stores-of-loads-merging.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
+declare void @g()
+
+define void @f(ptr %m, ptr %n, ptr %p, ptr %q, ptr %r, ptr %s, double %t) {
+; CHECK-LABEL: f:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -48
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset ra, -8
+; CHECK-NEXT:    .cfi_offset s0, -16
+; CHECK-NEXT:    .cfi_offset s1, -24
+; CHECK-NEXT:    .cfi_offset s2, -32
+; CHECK-NEXT:    csrr a6, vlenb
+; CHECK-NEXT:    sub sp, sp, a6
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 1 * vlenb
+; CHECK-NEXT:    mv s0, a5
+; CHECK-NEXT:    mv s1, a4
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v8, (a1)
+; CHECK-NEXT:    vle64.v v8, (a2)
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vs1r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT:    mv s2, a3
+; CHECK-NEXT:    call g
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vse64.v v8, (s2)
+; CHECK-NEXT:    vle64.v v8, (s1)
+; CHECK-NEXT:    vse64.v v8, (s0)
+; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    add sp, sp, a0
+; CHECK-NEXT:    .cfi_def_cfa sp, 48
+; CHECK-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore ra
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
+; CHECK-NEXT:    .cfi_restore s2
+; CHECK-NEXT:    addi sp, sp, 48
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    ret
+  %z0 = load i64, ptr %m
+  %m.1 = getelementptr i64, ptr %m, i64 1
+  %z1 = load i64, ptr %m.1
+  store i64 %z0, ptr %n
+  %n.1 = getelementptr i64, ptr %n, i64 1
+  store i64 %z1, ptr %n.1
+
+  %x0 = load i64, ptr %p
+  %p.1 = getelementptr i64, ptr %p, i64 1
+  %x1 = load i64, ptr %p.1
+  call void @g()
+  store i64 %x0, ptr %q
+  %q.1 = getelementptr i64, ptr %q, i64 1
+  store i64 %x1, ptr %q.1
+
+  %y0 = load i64, ptr %r
+  %r.1 = getelementptr i64, ptr %r, i64 1
+  %y1 = load i64, ptr %r.1
+  store i64 %y0, ptr %s
+  %s.1 = getelementptr i64, ptr %s, i64 1
+  store i64 %y1, ptr %s.1
+
+  ret void
+}
+
+define void @f1(ptr %m, ptr %n, ptr %p, ptr %q, ptr %r, ptr %s, double %t) {
+; CHECK-LABEL: f1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v8, (a2)
+; CHECK-NEXT:    fcvt.wu.d a0, fa0, rtz
+; CHECK-NEXT:    vse64.v v8, (a3)
+; CHECK-NEXT:    ret
+  %x0 = load i64, ptr %p
+  %p.1 = getelementptr i64, ptr %p, i64 1
+  %x1 = load i64, ptr %p.1
+  %t1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %t, metadata !"fpexcept.strict")
+  store i64 %x0, ptr %q
+  %q.1 = getelementptr i64, ptr %q, i64 1
+  store i64 %x1, ptr %q.1
+
+  ret void
+}


        


More information about the llvm-commits mailing list