[llvm] 6f4794f - [RISCV] Add a test case showing incorrect call-conv lowering
Fraser Cormack via llvm-commits
llvm-commits at lists.llvm.org
Thu May 27 09:04:34 PDT 2021
Author: Fraser Cormack
Date: 2021-05-27T16:55:48+01:00
New Revision: 6f4794feb60a9deb939873118a7182a8ea87732e
URL: https://github.com/llvm/llvm-project/commit/6f4794feb60a9deb939873118a7182a8ea87732e
DIFF: https://github.com/llvm/llvm-project/commit/6f4794feb60a9deb939873118a7182a8ea87732e.diff
LOG: [RISCV] Add a test case showing incorrect call-conv lowering
@HsiangKai helped find a bug in the lowering of indirect split
scalable-vector types in our calling convention. An imminent patch will
fix this.
Added:
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
new file mode 100644
index 0000000000000..7b4235e66f791
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+m,+experimental-v < %s | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+m,+experimental-v < %s | FileCheck %s --check-prefix=RV64
+
+; Check that we correctly scale the split part indirect offsets by VSCALE.
+; FIXME: We don't; we're loading a full 8 vector registers 64 bytes ahead of
+; the first address. This should be scaled by vlenb!
+define <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> %x, <vscale x 32 x i32> %y) {
+; RV32-LABEL: callee_scalable_vector_split_indirect:
+; RV32: # %bb.0:
+; RV32-NEXT: addi a1, a0, 64
+; RV32-NEXT: vl8re32.v v24, (a0)
+; RV32-NEXT: vl8re32.v v0, (a1)
+; RV32-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v16, v16, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: callee_scalable_vector_split_indirect:
+; RV64: # %bb.0:
+; RV64-NEXT: addi a1, a0, 64
+; RV64-NEXT: vl8re32.v v24, (a0)
+; RV64-NEXT: vl8re32.v v0, (a1)
+; RV64-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; RV64-NEXT: vadd.vv v8, v8, v24
+; RV64-NEXT: vadd.vv v16, v16, v0
+; RV64-NEXT: ret
+ %a = add <vscale x 32 x i32> %x, %y
+ ret <vscale x 32 x i32> %a
+}
+
+; Call the function above. Check that we set the arguments correctly.
+; FIXME: We don't, see above.
+define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x i32> %x) {
+; RV32-LABEL: caller_scalable_vector_split_indirect:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: addi a0, sp, 96
+; RV32-NEXT: vs8r.v v16, (a0)
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vmv8r.v v16, v8
+; RV32-NEXT: call callee_scalable_vector_split_indirect at plt
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_scalable_vector_split_indirect:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -32
+; RV64-NEXT: .cfi_def_cfa_offset 32
+; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: addi a0, sp, 88
+; RV64-NEXT: vs8r.v v16, (a0)
+; RV64-NEXT: addi a0, sp, 24
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; RV64-NEXT: vmv.v.i v8, 0
+; RV64-NEXT: addi a0, sp, 24
+; RV64-NEXT: vmv8r.v v16, v8
+; RV64-NEXT: call callee_scalable_vector_split_indirect at plt
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 32
+; RV64-NEXT: ret
+ %c = alloca i64
+ %a = call <vscale x 32 x i32> @callee_scalable_vector_split_indirect(<vscale x 32 x i32> zeroinitializer, <vscale x 32 x i32> %x)
+ ret <vscale x 32 x i32> %a
+}
More information about the llvm-commits
mailing list