[llvm] e542202 - [RISCV] Update vector calling convention test for tuple type. NFC (#111336)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 8 08:25:50 PDT 2024
Author: Brandon Wu
Date: 2024-10-08T08:25:47-07:00
New Revision: e542202c2763c53c35d1bbe7a3853d683add1303
URL: https://github.com/llvm/llvm-project/commit/e542202c2763c53c35d1bbe7a3853d683add1303
DIFF: https://github.com/llvm/llvm-project/commit/e542202c2763c53c35d1bbe7a3853d683add1303.diff
LOG: [RISCV] Update vector calling convention test for tuple type. NFC (#111336)
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index e03698eeb97151..b229af5849fe9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -87,7 +87,7 @@ define <vscale x 32 x i32> @caller_scalable_vector_split_indirect(<vscale x 32 x
ret <vscale x 32 x i32> %a
}
-define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
+define target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @caller_tuple_return() {
; RV32-LABEL: caller_tuple_return:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
@@ -95,9 +95,9 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee_tuple_return
-; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
-; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: vmv2r.v v10, v6
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -109,32 +109,32 @@ define {<vscale x 4 x i32>, <vscale x 4 x i32>} @caller_tuple_return() {
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee_tuple_return
-; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
-; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: vmv2r.v v10, v6
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
- %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
- %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
- %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
- %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %c, 0
- %e = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %d, <vscale x 4 x i32> %b, 1
- ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %e
+ %a = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @callee_tuple_return()
+ %b = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %a, i32 0)
+ %c = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %a, i32 1)
+ %d = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, <vscale x 4 x i32> %c, i32 0)
+ %e = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %d, <vscale x 4 x i32> %b, i32 1)
+ ret target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %e
}
-declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple_return()
+declare target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @callee_tuple_return()
-define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x) {
+define void @caller_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %x) {
; RV32-LABEL: caller_tuple_argument:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: vmv2r.v v12, v8
+; RV32-NEXT: vmv2r.v v6, v8
; RV32-NEXT: vmv2r.v v8, v10
-; RV32-NEXT: vmv2r.v v10, v12
+; RV32-NEXT: vmv2r.v v10, v6
; RV32-NEXT: call callee_tuple_argument
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
@@ -146,19 +146,19 @@ define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x)
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: vmv2r.v v12, v8
+; RV64-NEXT: vmv2r.v v6, v8
; RV64-NEXT: vmv2r.v v8, v10
-; RV64-NEXT: vmv2r.v v10, v12
+; RV64-NEXT: vmv2r.v v10, v6
; RV64-NEXT: call callee_tuple_argument
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
- %a = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 0
- %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %x, 1
- %c = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} poison, <vscale x 4 x i32> %b, 0
- %d = insertvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, <vscale x 4 x i32> %a, 1
- call void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %d)
+ %a = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %x, i32 0)
+ %b = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %x, i32 1)
+ %c = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) poison, <vscale x 4 x i32> %b, i32 0)
+ %d = call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv16i8_2t.nxv4i32(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %c, <vscale x 4 x i32> %a, i32 1)
+ call void @callee_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %d)
ret void
}
-declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
+declare void @callee_tuple_argument(target("riscv.vector.tuple", <vscale x 16 x i8>, 2))
More information about the llvm-commits
mailing list