[llvm] [AArch64] Improve lowering for scalable masked deinterleaving loads (PR #154338)
Cullen Rhodes via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 04:03:04 PDT 2025
================
@@ -0,0 +1,460 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define <vscale x 16 x i8> @foo_ld2_nxv16i8(<vscale x 16 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld2b { z0.b, z1.b }, p0/z, [x0]
+; CHECK-NEXT: add z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %wide.masked.vec = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %p, i32 1, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %add = add <vscale x 16 x i8> %part1, %part2
+ ret <vscale x 16 x i8> %add
+}
+
+define <vscale x 8 x i16> @foo_ld2_nxv8i16(<vscale x 8 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld2h { z0.h, z1.h }, p0/z, [x0]
+; CHECK-NEXT: add z0.h, z0.h, z1.h
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %wide.masked.vec = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16.p0(ptr %p, i32 2, <vscale x 16 x i1> %interleaved.mask, <vscale x 16 x i16> poison)
+ %deinterleaved.vec = call { <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.vector.deinterleave2.nxv16i16(<vscale x 16 x i16> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 1
+ %add = add <vscale x 8 x i16> %part1, %part2
+ ret <vscale x 8 x i16> %add
+}
+
+define <vscale x 4 x float> @foo_ld2_nxv4f32(<vscale x 4 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld2w { z0.s, z1.s }, p0/z, [x0]
+; CHECK-NEXT: fadd z0.s, z0.s, z1.s
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 8 x i1> @llvm.vector.interleave2.nxv8i1(<vscale x 4 x i1> %mask, <vscale x 4 x i1> %mask)
+ %wide.masked.vec = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(ptr %p, i32 4, <vscale x 8 x i1> %interleaved.mask, <vscale x 8 x float> poison)
+ %deinterleaved.vec = call { <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave2.nxv8f32(<vscale x 8 x float> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 1
+ %add = fadd <vscale x 4 x float> %part1, %part2
+ ret <vscale x 4 x float> %add
+}
+
+define <vscale x 2 x double> @foo_ld2_nxv2f64(<vscale x 2 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x0]
+; CHECK-NEXT: fadd z0.d, z0.d, z1.d
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 4 x i1> @llvm.vector.interleave2.nxv4i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> %mask)
+ %wide.masked.vec = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(ptr %p, i32 8, <vscale x 4 x i1> %interleaved.mask, <vscale x 4 x double> poison)
+ %deinterleaved.vec = call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 1
+ %add = fadd <vscale x 2 x double> %part1, %part2
+ ret <vscale x 2 x double> %add
+}
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8(<vscale x 16 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv16i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld4b { z0.b - z3.b }, p0/z, [x0]
+; CHECK-NEXT: add z4.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, z2.b, z3.b
+; CHECK-NEXT: add z0.b, z4.b, z0.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 1, <vscale x 64 x i1> %interleaved.mask, <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ ret <vscale x 16 x i8> %add3
+}
+
+define <vscale x 8 x i16> @foo_ld4_nxv8i16(<vscale x 8 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv8i16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld4h { z0.h - z3.h }, p0/z, [x0]
+; CHECK-NEXT: add z4.h, z0.h, z1.h
+; CHECK-NEXT: add z0.h, z2.h, z3.h
+; CHECK-NEXT: add z0.h, z4.h, z0.h
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave4.nxv32i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %wide.masked.vec = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %p, i32 2, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i16> poison)
+ %deinterleaved.vec = call { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } @llvm.vector.deinterleave4.nxv32i16(<vscale x 32 x i16> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 8 x i16> %part1, %part2
+ %add2 = add <vscale x 8 x i16> %part3, %part4
+ %add3 = add <vscale x 8 x i16> %add1, %add2
+ ret <vscale x 8 x i16> %add3
+}
+
+define <vscale x 4 x float> @foo_ld4_nxv4f32(<vscale x 4 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv4f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld4w { z0.s - z3.s }, p0/z, [x0]
+; CHECK-NEXT: fadd z4.s, z0.s, z1.s
+; CHECK-NEXT: fadd z0.s, z2.s, z3.s
+; CHECK-NEXT: fadd z0.s, z4.s, z0.s
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 16 x i1> @llvm.vector.interleave4.nxv16i1(<vscale x 4 x i1> %mask, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %mask, <vscale x 4 x i1> %mask)
+ %wide.masked.vec = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(ptr %p, i32 4, <vscale x 16 x i1> %interleaved.mask, <vscale x 16 x float> poison)
+ %deinterleaved.vec = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.vector.deinterleave4.nxv16f32(<vscale x 16 x float> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } %deinterleaved.vec, 3
+ %add1 = fadd <vscale x 4 x float> %part1, %part2
+ %add2 = fadd <vscale x 4 x float> %part3, %part4
+ %add3 = fadd <vscale x 4 x float> %add1, %add2
+ ret <vscale x 4 x float> %add3
+}
+
+define <vscale x 2 x double> @foo_ld4_nxv2f64(<vscale x 2 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv2f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld4d { z0.d - z3.d }, p0/z, [x0]
+; CHECK-NEXT: fadd z4.d, z0.d, z1.d
+; CHECK-NEXT: fadd z0.d, z2.d, z3.d
+; CHECK-NEXT: fadd z0.d, z4.d, z0.d
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 8 x i1> @llvm.vector.interleave4.nxv8i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> %mask, <vscale x 2 x i1> %mask, <vscale x 2 x i1> %mask)
+ %wide.masked.vec = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(ptr %p, i32 8, <vscale x 8 x i1> %interleaved.mask, <vscale x 8 x double> poison)
+ %deinterleaved.vec = call { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } @llvm.vector.deinterleave4.nxv8f64(<vscale x 8 x double> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double> } %deinterleaved.vec, 3
+ %add1 = fadd <vscale x 2 x double> %part1, %part2
+ %add2 = fadd <vscale x 2 x double> %part3, %part4
+ %add3 = fadd <vscale x 2 x double> %add1, %add2
+ ret <vscale x 2 x double> %add3
+}
+
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8_mul_use_of_mask(<vscale x 16 x i1> %mask, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld4_nxv16i8_mul_use_of_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld4b { z0.b - z3.b }, p0/z, [x0]
+; CHECK-NEXT: zip2 p1.b, p0.b, p0.b
+; CHECK-NEXT: zip1 p0.b, p0.b, p0.b
+; CHECK-NEXT: zip2 p2.b, p1.b, p1.b
+; CHECK-NEXT: zip1 p1.b, p1.b, p1.b
+; CHECK-NEXT: zip2 p3.b, p0.b, p0.b
+; CHECK-NEXT: add z4.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, z2.b, z3.b
+; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: zip1 p0.b, p0.b, p0.b
+; CHECK-NEXT: add z0.b, z4.b, z0.b
+; CHECK-NEXT: st1b { z1.b }, p2, [x1, #3, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p1, [x1, #2, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p3, [x1, #1, mul vl]
+; CHECK-NEXT: st1b { z1.b }, p0, [x1]
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 4, <vscale x 64 x i1> %interleaved.mask, <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ call void @llvm.masked.store.nxv64i8.p0(<vscale x 64 x i8> zeroinitializer, ptr %p2, i32 1, <vscale x 64 x i1> %interleaved.mask)
+ ret <vscale x 16 x i8> %add3
+}
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8_mask_of_interleaved_ones(ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv16i8_mask_of_interleaved_ones:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: ld4b { z0.b - z3.b }, p0/z, [x0]
+; CHECK-NEXT: add z4.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, z2.b, z3.b
+; CHECK-NEXT: add z0.b, z4.b, z0.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 64 x i1> @llvm.vector.interleave4.nxv64i1(<vscale x 16 x i1> splat(i1 1), <vscale x 16 x i1> splat(i1 1), <vscale x 16 x i1> splat(i1 1), <vscale x 16 x i1> splat(i1 1))
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 4, <vscale x 64 x i1> %interleaved.mask, <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ ret <vscale x 16 x i8> %add3
+}
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8_mask_of_ones(ptr %p) {
+; CHECK-LABEL: foo_ld4_nxv16i8_mask_of_ones:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: ld4b { z0.b - z3.b }, p0/z, [x0]
+; CHECK-NEXT: add z4.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, z2.b, z3.b
+; CHECK-NEXT: add z0.b, z4.b, z0.b
+; CHECK-NEXT: ret
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 4, <vscale x 64 x i1> splat(i1 1), <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ ret <vscale x 16 x i8> %add3
+}
+
+
+; Negative tests
+
+define <vscale x 16 x i8> @foo_ld2_nxv16i8_mul_use_of_load(<vscale x 16 x i1> %mask, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld2_nxv16i8_mul_use_of_load:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 p1.b, p0.b, p0.b
+; CHECK-NEXT: zip2 p0.b, p0.b, p0.b
+; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: str z2, [x1]
+; CHECK-NEXT: uzp2 z0.b, z2.b, z1.b
+; CHECK-NEXT: uzp1 z3.b, z2.b, z1.b
+; CHECK-NEXT: str z1, [x1, #1, mul vl]
+; CHECK-NEXT: add z0.b, z3.b, z0.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %wide.masked.vec = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ call void @llvm.masked.store.nxv64i8.p0(<vscale x 32 x i8> %wide.masked.vec, ptr %p2, i32 1, <vscale x 32 x i1> splat(i1 1))
+ ret <vscale x 16 x i8> %add1
+}
+
+define <vscale x 16 x i8> @foo_ld2_nxv16i8_bad_mask(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask2, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld2_nxv16i8_bad_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 p2.b, p0.b, p1.b
+; CHECK-NEXT: zip2 p0.b, p0.b, p1.b
+; CHECK-NEXT: ld1b { z1.b }, p2/z, [x0]
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: uzp2 z2.b, z1.b, z0.b
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: add z0.b, z0.b, z2.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask2)
+ %wide.masked.vec = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %p, i32 4, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ ret <vscale x 16 x i8> %add1
+}
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8_bad_mask2(<vscale x 32 x i1> %mask, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld4_nxv16i8_bad_mask2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 p2.b, p1.b, p1.b
+; CHECK-NEXT: zip2 p1.b, p1.b, p1.b
+; CHECK-NEXT: zip2 p3.b, p0.b, p0.b
+; CHECK-NEXT: ld1b { z3.b }, p2/z, [x0, #2, mul vl]
+; CHECK-NEXT: zip1 p0.b, p0.b, p0.b
+; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0, #3, mul vl]
+; CHECK-NEXT: ld1b { z0.b }, p3/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x0]
+; CHECK-NEXT: uzp2 z4.b, z3.b, z2.b
+; CHECK-NEXT: uzp1 z2.b, z3.b, z2.b
+; CHECK-NEXT: uzp2 z5.b, z1.b, z0.b
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uzp2 z1.b, z5.b, z4.b
+; CHECK-NEXT: uzp2 z3.b, z0.b, z2.b
+; CHECK-NEXT: uzp1 z4.b, z5.b, z4.b
+; CHECK-NEXT: uzp1 z0.b, z0.b, z2.b
+; CHECK-NEXT: add z1.b, z3.b, z1.b
+; CHECK-NEXT: add z0.b, z0.b, z4.b
+; CHECK-NEXT: add z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 64 x i1> @llvm.vector.interleave2.nxv64i1(<vscale x 32 x i1> %mask, <vscale x 32 x i1> %mask)
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 4, <vscale x 64 x i1> %interleaved.mask, <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ ret <vscale x 16 x i8> %add3
+}
+
+define <vscale x 16 x i8> @foo_ld2_nxv16i8_bad_mask3(<vscale x 32 x i1> %mask, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld2_nxv16i8_bad_mask3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: ld1b { z0.b }, p1/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x0]
+; CHECK-NEXT: uzp2 z2.b, z1.b, z0.b
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: add z0.b, z0.b, z2.b
+; CHECK-NEXT: ret
+ %wide.masked.vec = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %p, i32 4, <vscale x 32 x i1> %mask, <vscale x 32 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ ret <vscale x 16 x i8> %add1
+}
+
+define <vscale x 16 x i8> @foo_ld4_nxv16i8_bad_mask4(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask2, ptr %p, ptr %p2) {
+; CHECK-LABEL: foo_ld4_nxv16i8_bad_mask4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 p2.b, p0.b, p0.b
+; CHECK-NEXT: zip2 p0.b, p0.b, p0.b
+; CHECK-NEXT: zip1 p3.b, p1.b, p1.b
+; CHECK-NEXT: ld1b { z1.b }, p2/z, [x0]
+; CHECK-NEXT: zip2 p1.b, p1.b, p1.b
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: ld1b { z3.b }, p3/z, [x0, #2, mul vl]
+; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0, #3, mul vl]
+; CHECK-NEXT: uzp2 z5.b, z1.b, z0.b
+; CHECK-NEXT: uzp1 z0.b, z1.b, z0.b
+; CHECK-NEXT: uzp2 z4.b, z3.b, z2.b
+; CHECK-NEXT: uzp1 z2.b, z3.b, z2.b
+; CHECK-NEXT: uzp2 z1.b, z5.b, z4.b
+; CHECK-NEXT: uzp2 z3.b, z0.b, z2.b
+; CHECK-NEXT: uzp1 z4.b, z5.b, z4.b
+; CHECK-NEXT: uzp1 z0.b, z0.b, z2.b
+; CHECK-NEXT: add z1.b, z3.b, z1.b
+; CHECK-NEXT: add z0.b, z0.b, z4.b
+; CHECK-NEXT: add z0.b, z0.b, z1.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %interleaved.mask2 = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask2, <vscale x 16 x i1> %mask2)
+ %bad.mask.init = call <vscale x 64 x i1> @llvm.vector.insert.nxv64i1(<vscale x 64 x i1> poison, <vscale x 32 x i1> %interleaved.mask, i64 0)
+ %bad.mask = call <vscale x 64 x i1> @llvm.vector.insert.nxv64i1(<vscale x 64 x i1> %bad.mask.init, <vscale x 32 x i1> %interleaved.mask2, i64 32)
+ %wide.masked.vec = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(ptr %p, i32 4, <vscale x 64 x i1> %bad.mask, <vscale x 64 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave4.nxv64i8(<vscale x 64 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %part3 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 2
+ %part4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 3
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ %add2 = add <vscale x 16 x i8> %part3, %part4
+ %add3 = add <vscale x 16 x i8> %add1, %add2
+ ret <vscale x 16 x i8> %add3
+}
+
+define <vscale x 8 x i8> @foo_ld2_nxv8i8(<vscale x 8 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip2 p1.h, p0.h, p0.h
+; CHECK-NEXT: zip1 p0.h, p0.h, p0.h
+; CHECK-NEXT: uzp1 p0.b, p0.b, p1.b
+; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT: uunpkhi z1.h, z0.b
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: uzp2 z2.h, z0.h, z1.h
+; CHECK-NEXT: uzp1 z0.h, z0.h, z1.h
+; CHECK-NEXT: add z0.h, z0.h, z2.h
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 16 x i1> @llvm.vector.interleave2.nxv16i1(<vscale x 8 x i1> %mask, <vscale x 8 x i1> %mask)
+ %wide.masked.vec = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %p, i32 1, <vscale x 16 x i1> %interleaved.mask, <vscale x 16 x i8> poison)
+ %deinterleaved.vec = call { <vscale x 8 x i8>, <vscale x 8 x i8> } @llvm.vector.deinterleave2.nxv16i8(<vscale x 16 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 8 x i8>, <vscale x 8 x i8> } %deinterleaved.vec, 1
+ %add1 = add <vscale x 8 x i8> %part1, %part2
+ ret <vscale x 8 x i8> %add1
+}
+
+define <vscale x 16 x i8> @foo_ld2_nxv16i8_bad_passthru(<vscale x 16 x i1> %mask, ptr %p) {
+; CHECK-LABEL: foo_ld2_nxv16i8_bad_passthru:
+; CHECK: // %bb.0:
+; CHECK-NEXT: zip1 p1.b, p0.b, p0.b
+; CHECK-NEXT: mov z0.b, #3 // =0x3
+; CHECK-NEXT: zip2 p0.b, p0.b, p0.b
+; CHECK-NEXT: ld1b { z2.b }, p1/z, [x0]
+; CHECK-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl]
+; CHECK-NEXT: sel z1.b, p0, z1.b, z0.b
+; CHECK-NEXT: mov z0.b, p1/m, z2.b
+; CHECK-NEXT: uzp2 z2.b, z0.b, z1.b
+; CHECK-NEXT: uzp1 z0.b, z0.b, z1.b
+; CHECK-NEXT: add z0.b, z0.b, z2.b
+; CHECK-NEXT: ret
+ %interleaved.mask = call <vscale x 32 x i1> @llvm.vector.interleave2.nxv32i1(<vscale x 16 x i1> %mask, <vscale x 16 x i1> %mask)
+ %wide.masked.vec = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %p, i32 1, <vscale x 32 x i1> %interleaved.mask, <vscale x 32 x i8> splat(i8 3))
+ %deinterleaved.vec = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.vector.deinterleave2.nxv32i8(<vscale x 32 x i8> %wide.masked.vec)
+ %part1 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 0
+ %part2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %deinterleaved.vec, 1
+ %add1 = add <vscale x 16 x i8> %part1, %part2
+ ret <vscale x 16 x i8> %add1
+}
+
+
+define <vscale x 8 x i16> @foo_deinterleave2_not_load(<vscale x 16 x i16> %vec) {
----------------
c-rhodes wrote:
there's already a test for this in `llvm/test/CodeGen/AArch64/sve-vector-deinterleave.ll`, this can be removed. Apologies if this is one you added based on my last set of comments.
https://github.com/llvm/llvm-project/pull/154338
More information about the llvm-commits
mailing list