[llvm] [GlobalISel] Import extract/insert subvector (PR #110287)

Thorsten Schütt via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 30 08:49:16 PDT 2024


================
@@ -0,0 +1,206 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -O0 -mtriple=aarch64-linux-gnu -mattr=+sve -global-isel -stop-after=irtranslator -aarch64-enable-gisel-sve=1 %s -o - | FileCheck %s
+
+define i32 @extract_v4i32_vector_insert_const(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert_const
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d1, $q0, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; CHECK-NEXT:   [[INSERT_SUBVECTOR:%[0-9]+]]:_(<4 x s32>) = G_INSERT_SUBVECTOR [[COPY]], [[COPY1]](<2 x s32>), 0
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[INSERT_SUBVECTOR]](<4 x s32>), [[C]](s64)
+  ; CHECK-NEXT:   $w0 = COPY [[EVEC]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+  %d = extractelement <4 x i32> %vector, i32 1
+  ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_insert(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d1, $q0, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[INSERT_SUBVECTOR:%[0-9]+]]:_(<4 x s32>) = G_INSERT_SUBVECTOR [[COPY]], [[COPY1]](<2 x s32>), 0
+  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[INSERT_SUBVECTOR]](<4 x s32>), [[ZEXT]](s64)
+  ; CHECK-NEXT:   $w0 = COPY [[EVEC]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <4 x i32> @llvm.vector.insert.v4i32.v2i32(<4 x i32> %a, <2 x i32> %b, i64 0)
+  %d = extractelement <4 x i32> %vector, i32 %c
+  ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract(<4 x i32> %a, <2 x i32> %b, i32 %c) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_extract
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d1, $q0, $w0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<4 x s32>) = G_EXTRACT_SUBVECTOR [[COPY]](<4 x s32>), 0
+  ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY2]](s32)
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[EXTRACT_SUBVECTOR]](<4 x s32>), [[ZEXT]](s64)
+  ; CHECK-NEXT:   $w0 = COPY [[EVEC]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <4 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %a, i64 0)
+  %d = extractelement <4 x i32> %vector, i32 %c
+  ret i32 %d
+}
+
+define i32 @extract_v4i32_vector_extract_const(<vscale x 4 x i32> %a, i32 %c, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_extract_const
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $w0, $x1, $z0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_EXTRACT_SUBVECTOR [[COPY]](<vscale x 4 x s32>), 0
+  ; CHECK-NEXT:   G_STORE [[EXTRACT_SUBVECTOR]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.p)
+  ; CHECK-NEXT:   $w0 = COPY [[C]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <vscale x 4 x i32> @llvm.vector.extract(<vscale x 4 x i32> %a, i64 0)
+  store <vscale x 4 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_insert_const_vscale(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %c, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert_const_vscale
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $w0, $x1, $z0, $z1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z1
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[INSERT_SUBVECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_SUBVECTOR [[COPY]], [[COPY1]](<vscale x 4 x s32>), 0
+  ; CHECK-NEXT:   G_STORE [[INSERT_SUBVECTOR]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.p)
+  ; CHECK-NEXT:   $w0 = COPY [[C]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i64 0)
+  store <vscale x 4 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_extract_const_illegal_fixed(<4 x i32> %a, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_extract_const_illegal_fixed
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $q0, $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<4 x s32>), [[C]](s64)
+  ; CHECK-NEXT:   G_STORE [[EVEC]](s32), [[COPY1]](p0) :: (store (s32) into %ir.p, align 16)
+  ; CHECK-NEXT:   $w0 = COPY [[C1]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <1 x i32> @llvm.vector.extract(<4 x i32> %a, i64 0)
+  store <1 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_extract_const_illegal_scalable(<vscale x 4 x i32> %a, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_extract_const_illegal_scalable
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $x0, $z0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x0
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[VSCALE:%[0-9]+]]:_(s64) = G_VSCALE i64 1
+  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[VSCALE]], [[C]]
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<vscale x 4 x s32>), [[MUL]](s64)
+  ; CHECK-NEXT:   G_STORE [[EVEC]](s32), [[COPY1]](p0) :: (store (s32) into %ir.p, align 16)
+  ; CHECK-NEXT:   $w0 = COPY [[C1]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <1 x i32> @llvm.vector.extract(<vscale x 4 x i32> %a, i64 0)
+  store <1 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_insert_const_illegal_scalable(<vscale x 4 x i32> %a, <1 x i32> %b, i32 %c, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert_const_illegal_scalable
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d1, $w0, $x1, $z0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $z0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[VSCALE:%[0-9]+]]:_(s64) = G_VSCALE i64 1
+  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[VSCALE]], [[C]]
+  ; CHECK-NEXT:   [[IVEC:%[0-9]+]]:_(<vscale x 4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[UV]](s32), [[MUL]](s64)
+  ; CHECK-NEXT:   G_STORE [[IVEC]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.p)
+  ; CHECK-NEXT:   $w0 = COPY [[C1]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <vscale x 4  x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> %a, <1 x i32> %b, i64 0)
+  store <vscale x 4 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_insert_const_fixed(<4 x i32> %a, <1 x i32> %b, i32 %c, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert_const_fixed
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d1, $q0, $w0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[COPY]], [[UV]](s32), [[C]](s64)
+  ; CHECK-NEXT:   G_STORE [[IVEC]](<4 x s32>), [[COPY3]](p0) :: (store (<4 x s32>) into %ir.p)
+  ; CHECK-NEXT:   $w0 = COPY [[C1]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <4  x i32> @llvm.vector.insert.v4i32.v4i32(<4 x i32> %a, <1 x i32> %b, i64 0)
+  store <4 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
+
+define i32 @extract_v4i32_vector_insert_const_fixed_illegal(<1 x i32> %a, <1 x i32> %b, i32 %c, ptr %p) {
+  ; CHECK-LABEL: name: extract_v4i32_vector_insert_const_fixed_illegal
+  ; CHECK: bb.1.entry:
+  ; CHECK-NEXT:   liveins: $d0, $d1, $w0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1
+  ; CHECK-NEXT:   [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w0
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p0) = COPY $x1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
+  ; CHECK-NEXT:   G_STORE [[COPY4]](s32), [[COPY3]](p0) :: (store (s32) into %ir.p, align 16)
+  ; CHECK-NEXT:   $w0 = COPY [[C]](s32)
+  ; CHECK-NEXT:   RET_ReallyLR implicit $w0
+entry:
+  %vector = call <1  x i32> @llvm.vector.insert.v1i32.v4i32(<1 x i32> %a, <1 x i32> %b, i64 0)
+  store <1 x i32> %vector, ptr %p, align 16
+  ret i32 1
+}
----------------
tschuett wrote:

Sure. There are some limitations with scalable vectors.

https://github.com/llvm/llvm-project/pull/110287


More information about the llvm-commits mailing list