[llvm] [RISCV] Support llvm.masked.expandload intrinsic (PR #101954)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 6 04:23:45 PDT 2024


================
@@ -0,0 +1,1313 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d,+m,+zbb %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+
+; Load + expand for i8 type
+
+define <1 x i8> @test_expandload_v1i8(ptr %base, <1 x i1> %mask, <1 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> %mask, <1 x i8> %passthru)
+  ret <1 x i8> %res
+}
+
+define <1 x i8> @test_expandload_v1i8_all_ones(ptr %base, <1 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v1i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <1 x i8> @llvm.masked.expandload.v1i8(ptr align 1 %base, <1 x i1> splat (i1 true), <1 x i8> %passthru)
+  ret <1 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8(ptr %base, <2 x i1> %mask, <2 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> %mask, <2 x i8> %passthru)
+  ret <2 x i8> %res
+}
+
+define <2 x i8> @test_expandload_v2i8_all_ones(ptr %base, <2 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v2i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <2 x i8> @llvm.masked.expandload.v2i8(ptr align 1 %base, <2 x i1> splat (i1 true), <2 x i8> %passthru)
+  ret <2 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8(ptr %base, <4 x i1> %mask, <4 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> %mask, <4 x i8> %passthru)
+  ret <4 x i8> %res
+}
+
+define <4 x i8> @test_expandload_v4i8_all_ones(ptr %base, <4 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v4i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <4 x i8> @llvm.masked.expandload.v4i8(ptr align 1 %base, <4 x i1> splat (i1 true), <4 x i8> %passthru)
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8(ptr %base, <8 x i1> %mask, <8 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> %mask, <8 x i8> %passthru)
+  ret <8 x i8> %res
+}
+
+define <8 x i8> @test_expandload_v8i8_all_ones(ptr %base, <8 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v8i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <8 x i8> @llvm.masked.expandload.v8i8(ptr align 1 %base, <8 x i1> splat (i1 true), <8 x i8> %passthru)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8(ptr %base, <16 x i1> %mask, <16 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> %mask, <16 x i8> %passthru)
+  ret <16 x i8> %res
+}
+
+define <16 x i8> @test_expandload_v16i8_all_ones(ptr %base, <16 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v16i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <16 x i8> @llvm.masked.expandload.v16i8(ptr align 1 %base, <16 x i1> splat (i1 true), <16 x i8> %passthru)
+  ret <16 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8(ptr %base, <32 x i1> %mask, <32 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    viota.m v10, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> %mask, <32 x i8> %passthru)
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @test_expandload_v32i8_all_ones(ptr %base, <32 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v32i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.masked.expandload.v32i8(ptr align 1 %base, <32 x i1> splat (i1 true), <32 x i8> %passthru)
+  ret <32 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8(ptr %base, <64 x i1> %mask, <64 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 64
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT:    viota.m v12, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> %mask, <64 x i8> %passthru)
+  ret <64 x i8> %res
+}
+
+define <64 x i8> @test_expandload_v64i8_all_ones(ptr %base, <64 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v64i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 64
+; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <64 x i8> @llvm.masked.expandload.v64i8(ptr align 1 %base, <64 x i1> splat (i1 true), <64 x i8> %passthru)
+  ret <64 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8(ptr %base, <128 x i1> %mask, <128 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v128i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    viota.m v16, v0
+; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> %mask, <128 x i8> %passthru)
+  ret <128 x i8> %res
+}
+
+define <128 x i8> @test_expandload_v128i8_all_ones(ptr %base, <128 x i8> %passthru) {
+; CHECK-LABEL: test_expandload_v128i8_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 128
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <128 x i8> @llvm.masked.expandload.v128i8(ptr align 1 %base, <128 x i1> splat (i1 true), <128 x i8> %passthru)
+  ret <128 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8(ptr %base, <256 x i1> %mask, <256 x i8> %passthru) {
+; CHECK-RV32-LABEL: test_expandload_v256i8:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    csrr a2, vlenb
+; CHECK-RV32-NEXT:    slli a2, a2, 4
+; CHECK-RV32-NEXT:    sub sp, sp, a2
+; CHECK-RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-RV32-NEXT:    addi a2, sp, 16
+; CHECK-RV32-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-RV32-NEXT:    vmv1r.v v9, v0
+; CHECK-RV32-NEXT:    li a2, 128
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-RV32-NEXT:    vle8.v v16, (a1)
+; CHECK-RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vslidedown.vi v10, v0, 1
+; CHECK-RV32-NEXT:    li a1, 32
+; CHECK-RV32-NEXT:    vsrl.vx v11, v10, a1
+; CHECK-RV32-NEXT:    vmv.x.s a3, v11
+; CHECK-RV32-NEXT:    vsrl.vx v11, v0, a1
+; CHECK-RV32-NEXT:    vmv.x.s a1, v11
+; CHECK-RV32-NEXT:    vmv.x.s a4, v10
+; CHECK-RV32-NEXT:    vmv.x.s a5, v0
+; CHECK-RV32-NEXT:    cpop a1, a1
+; CHECK-RV32-NEXT:    cpop a5, a5
+; CHECK-RV32-NEXT:    add a1, a5, a1
+; CHECK-RV32-NEXT:    cpop a3, a3
+; CHECK-RV32-NEXT:    cpop a4, a4
+; CHECK-RV32-NEXT:    add a3, a4, a3
+; CHECK-RV32-NEXT:    add a1, a1, a3
+; CHECK-RV32-NEXT:    add a1, a0, a1
+; CHECK-RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-RV32-NEXT:    viota.m v24, v8
+; CHECK-RV32-NEXT:    csrr a2, vlenb
+; CHECK-RV32-NEXT:    slli a2, a2, 3
+; CHECK-RV32-NEXT:    add a2, sp, a2
+; CHECK-RV32-NEXT:    addi a2, a2, 16
+; CHECK-RV32-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-RV32-NEXT:    vmv1r.v v0, v8
+; CHECK-RV32-NEXT:    csrr a2, vlenb
+; CHECK-RV32-NEXT:    slli a2, a2, 3
+; CHECK-RV32-NEXT:    add a2, sp, a2
+; CHECK-RV32-NEXT:    addi a2, a2, 16
+; CHECK-RV32-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-RV32-NEXT:    vluxei8.v v16, (a1), v24, v0.t
+; CHECK-RV32-NEXT:    viota.m v24, v9
+; CHECK-RV32-NEXT:    vmv1r.v v0, v9
+; CHECK-RV32-NEXT:    addi a1, sp, 16
+; CHECK-RV32-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-RV32-NEXT:    vluxei8.v v8, (a0), v24, v0.t
+; CHECK-RV32-NEXT:    csrr a0, vlenb
+; CHECK-RV32-NEXT:    slli a0, a0, 4
+; CHECK-RV32-NEXT:    add sp, sp, a0
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: test_expandload_v256i8:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    addi sp, sp, -16
+; CHECK-RV64-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV64-NEXT:    csrr a2, vlenb
+; CHECK-RV64-NEXT:    slli a2, a2, 4
+; CHECK-RV64-NEXT:    sub sp, sp, a2
+; CHECK-RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-RV64-NEXT:    addi a2, sp, 16
+; CHECK-RV64-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-RV64-NEXT:    vmv1r.v v9, v0
+; CHECK-RV64-NEXT:    li a2, 128
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-RV64-NEXT:    vle8.v v16, (a1)
+; CHECK-RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vslidedown.vi v10, v0, 1
+; CHECK-RV64-NEXT:    vmv.x.s a1, v10
+; CHECK-RV64-NEXT:    vmv.x.s a3, v0
+; CHECK-RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-RV64-NEXT:    viota.m v24, v8
+; CHECK-RV64-NEXT:    csrr a2, vlenb
+; CHECK-RV64-NEXT:    slli a2, a2, 3
+; CHECK-RV64-NEXT:    add a2, sp, a2
+; CHECK-RV64-NEXT:    addi a2, a2, 16
+; CHECK-RV64-NEXT:    vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-RV64-NEXT:    cpop a2, a3
+; CHECK-RV64-NEXT:    cpop a1, a1
+; CHECK-RV64-NEXT:    add a2, a0, a2
+; CHECK-RV64-NEXT:    add a1, a2, a1
+; CHECK-RV64-NEXT:    vmv1r.v v0, v8
+; CHECK-RV64-NEXT:    csrr a2, vlenb
+; CHECK-RV64-NEXT:    slli a2, a2, 3
+; CHECK-RV64-NEXT:    add a2, sp, a2
+; CHECK-RV64-NEXT:    addi a2, a2, 16
+; CHECK-RV64-NEXT:    vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-RV64-NEXT:    vluxei8.v v16, (a1), v24, v0.t
+; CHECK-RV64-NEXT:    viota.m v24, v9
+; CHECK-RV64-NEXT:    vmv1r.v v0, v9
+; CHECK-RV64-NEXT:    addi a1, sp, 16
+; CHECK-RV64-NEXT:    vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-RV64-NEXT:    vluxei8.v v8, (a0), v24, v0.t
+; CHECK-RV64-NEXT:    csrr a0, vlenb
+; CHECK-RV64-NEXT:    slli a0, a0, 4
+; CHECK-RV64-NEXT:    add sp, sp, a0
+; CHECK-RV64-NEXT:    addi sp, sp, 16
+; CHECK-RV64-NEXT:    ret
+  %res = call <256 x i8> @llvm.masked.expandload.v256i8(ptr align 1 %base, <256 x i1> %mask, <256 x i8> %passthru)
+  ret <256 x i8> %res
+}
+
+define <256 x i8> @test_expandload_v256i8_all_ones(ptr %base, <256 x i8> %passthru) {
+; CHECK-RV32-LABEL: test_expandload_v256i8_all_ones:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    li a1, 128
+; CHECK-RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-RV32-NEXT:    vmset.m v8
+; CHECK-RV32-NEXT:    li a2, 32
+; CHECK-RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-RV32-NEXT:    vsrl.vx v9, v8, a2
+; CHECK-RV32-NEXT:    vmv.x.s a3, v9
+; CHECK-RV32-NEXT:    cpop a3, a3
+; CHECK-RV32-NEXT:    vmv.x.s a4, v8
+; CHECK-RV32-NEXT:    cpop a4, a4
+; CHECK-RV32-NEXT:    add a3, a4, a3
+; CHECK-RV32-NEXT:    vslidedown.vi v8, v8, 1
+; CHECK-RV32-NEXT:    vsrl.vx v9, v8, a2
+; CHECK-RV32-NEXT:    vmv.x.s a2, v9
+; CHECK-RV32-NEXT:    cpop a2, a2
+; CHECK-RV32-NEXT:    vmv.x.s a4, v8
+; CHECK-RV32-NEXT:    cpop a4, a4
+; CHECK-RV32-NEXT:    add a2, a4, a2
+; CHECK-RV32-NEXT:    add a3, a0, a3
+; CHECK-RV32-NEXT:    add a2, a3, a2
+; CHECK-RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-RV32-NEXT:    vle8.v v16, (a2)
+; CHECK-RV32-NEXT:    vle8.v v8, (a0)
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: test_expandload_v256i8_all_ones:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    li a1, 128
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-RV64-NEXT:    vle8.v v8, (a0)
+; CHECK-RV64-NEXT:    vmset.m v16
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-RV64-NEXT:    vmv.x.s a2, v16
+; CHECK-RV64-NEXT:    cpop a2, a2
+; CHECK-RV64-NEXT:    vslidedown.vi v16, v16, 1
+; CHECK-RV64-NEXT:    vmv.x.s a3, v16
+; CHECK-RV64-NEXT:    cpop a3, a3
+; CHECK-RV64-NEXT:    add a0, a0, a2
+; CHECK-RV64-NEXT:    add a0, a0, a3
+; CHECK-RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-RV64-NEXT:    vle8.v v16, (a0)
+; CHECK-RV64-NEXT:    ret
+  %res = call <256 x i8> @llvm.masked.expandload.v256i8(ptr align 1 %base, <256 x i1> splat (i1 true), <256 x i8> %passthru)
+  ret <256 x i8> %res
+}
+
+declare <1 x i8> @llvm.masked.expandload.v1i8(ptr, <1 x i1>, <1 x i8>)
+declare <2 x i8> @llvm.masked.expandload.v2i8(ptr, <2 x i1>, <2 x i8>)
+declare <4 x i8> @llvm.masked.expandload.v4i8(ptr, <4 x i1>, <4 x i8>)
+declare <8 x i8> @llvm.masked.expandload.v8i8(ptr, <8 x i1>, <8 x i8>)
+declare <16 x i8> @llvm.masked.expandload.v16i8(ptr, <16 x i1>, <16 x i8>)
+declare <32 x i8> @llvm.masked.expandload.v32i8(ptr, <32 x i1>, <32 x i8>)
+declare <64 x i8> @llvm.masked.expandload.v64i8(ptr, <64 x i1>, <64 x i8>)
+declare <128 x i8> @llvm.masked.expandload.v128i8(ptr, <128 x i1>, <128 x i8>)
+declare <256 x i8> @llvm.masked.expandload.v256i8(ptr, <256 x i1>, <256 x i8>)
+
+; Load + expand for i16 type
+
+define <1 x i16> @test_expandload_v1i16(ptr %base, <1 x i1> %mask, <1 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> %mask, <1 x i16> %passthru)
+  ret <1 x i16> %res
+}
+
+define <1 x i16> @test_expandload_v1i16_all_ones(ptr %base, <1 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v1i16_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <1 x i16> @llvm.masked.expandload.v1i16(ptr align 2 %base, <1 x i1> splat (i1 true), <1 x i16> %passthru)
+  ret <1 x i16> %res
+}
+
+define <2 x i16> @test_expandload_v2i16(ptr %base, <2 x i1> %mask, <2 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> %mask, <2 x i16> %passthru)
+  ret <2 x i16> %res
+}
+
+define <2 x i16> @test_expandload_v2i16_all_ones(ptr %base, <2 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v2i16_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <2 x i16> @llvm.masked.expandload.v2i16(ptr align 2 %base, <2 x i1> splat (i1 true), <2 x i16> %passthru)
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @test_expandload_v4i16(ptr %base, <4 x i1> %mask, <4 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> %mask, <4 x i16> %passthru)
+  ret <4 x i16> %res
+}
+
+define <4 x i16> @test_expandload_v4i16_all_ones(ptr %base, <4 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v4i16_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <4 x i16> @llvm.masked.expandload.v4i16(ptr align 2 %base, <4 x i1> splat (i1 true), <4 x i16> %passthru)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @test_expandload_v8i16(ptr %base, <8 x i1> %mask, <8 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    viota.m v9, v0
+; CHECK-NEXT:    vsll.vi v9, v9, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> %mask, <8 x i16> %passthru)
+  ret <8 x i16> %res
+}
+
+define <8 x i16> @test_expandload_v8i16_all_ones(ptr %base, <8 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v8i16_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <8 x i16> @llvm.masked.expandload.v8i16(ptr align 2 %base, <8 x i1> splat (i1 true), <8 x i16> %passthru)
+  ret <8 x i16> %res
+}
+
+define <16 x i16> @test_expandload_v16i16(ptr %base, <16 x i1> %mask, <16 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    viota.m v10, v0
+; CHECK-NEXT:    vsll.vi v10, v10, 1, v0.t
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
+; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <16 x i16> @llvm.masked.expandload.v16i16(ptr align 2 %base, <16 x i1> %mask, <16 x i16> %passthru)
+  ret <16 x i16> %res
+}
+
+define <16 x i16> @test_expandload_v16i16_all_ones(ptr %base, <16 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v16i16_all_ones:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <16 x i16> @llvm.masked.expandload.v16i16(ptr align 2 %base, <16 x i1> splat (i1 true), <16 x i16> %passthru)
+  ret <16 x i16> %res
+}
+
+define <32 x i16> @test_expandload_v32i16(ptr %base, <32 x i1> %mask, <32 x i16> %passthru) {
+; CHECK-LABEL: test_expandload_v32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT:    viota.m v12, v0
+; CHECK-NEXT:    vsll.vi v12, v12, 1, v0.t
----------------
lukel97 wrote:

Not for this PR, but as a follow up it would be good to narrow the LMUL of the indices to reduce register pressure

https://github.com/llvm/llvm-project/pull/101954


More information about the llvm-commits mailing list