[llvm] 6251b6b - [AArch64] Add tests with sext of vec3 loads.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 30 05:22:24 PST 2024


Author: Florian Hahn
Date: 2024-01-30T13:21:51Z
New Revision: 6251b6bd8d219fe2d99d125095622566721fe6f4

URL: https://github.com/llvm/llvm-project/commit/6251b6bd8d219fe2d99d125095622566721fe6f4
DIFF: https://github.com/llvm/llvm-project/commit/6251b6bd8d219fe2d99d125095622566721fe6f4.diff

LOG: [AArch64] Add tests with sext of vec3 loads.

Another round of additional tests for
https://github.com/llvm/llvm-project/pull/7863
with different sext/zext and use variants.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
index 21079ef77877..f2cfa6a842ce 100644
--- a/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
@@ -345,6 +345,86 @@ define <3 x i32> @load_v3i32(ptr %src) {
   ret <3 x i32> %l
 }
 
+define <3 x i32> @load_v3i8_zext_to_3xi32(ptr %src) {
+; CHECK-LABEL: load_v3i8_zext_to_3xi32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    movi.2d v1, #0x0000ff000000ff
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ld1.b { v0 }[4], [x8]
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    and.16b v0, v0, v1
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+;
+; BE-LABEL: load_v3i8_zext_to_3xi32:
+; BE:       // %bb.0:
+; BE-NEXT:    sub sp, sp, #16
+; BE-NEXT:    .cfi_def_cfa_offset 16
+; BE-NEXT:    ldrh w8, [x0]
+; BE-NEXT:    movi v1.2d, #0x0000ff000000ff
+; BE-NEXT:    strh w8, [sp, #12]
+; BE-NEXT:    add x8, x0, #2
+; BE-NEXT:    ldr s0, [sp, #12]
+; BE-NEXT:    rev32 v0.8b, v0.8b
+; BE-NEXT:    ushll v0.8h, v0.8b, #0
+; BE-NEXT:    ld1 { v0.b }[4], [x8]
+; BE-NEXT:    ushll v0.4s, v0.4h, #0
+; BE-NEXT:    and v0.16b, v0.16b, v1.16b
+; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; BE-NEXT:    add sp, sp, #16
+; BE-NEXT:    ret
+  %l = load <3 x i8>, ptr %src, align 1
+  %e = zext <3 x i8> %l to <3 x i32>
+  ret <3 x i32> %e
+}
+
+define <3 x i32> @load_v3i8_sext_to_3xi32(ptr %src) {
+; CHECK-LABEL: load_v3i8_sext_to_3xi32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    add x8, x0, #2
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ld1.b { v0 }[4], [x8]
+; CHECK-NEXT:    ushll.4s v0, v0, #0
+; CHECK-NEXT:    shl.4s v0, v0, #24
+; CHECK-NEXT:    sshr.4s v0, v0, #24
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+;
+; BE-LABEL: load_v3i8_sext_to_3xi32:
+; BE:       // %bb.0:
+; BE-NEXT:    sub sp, sp, #16
+; BE-NEXT:    .cfi_def_cfa_offset 16
+; BE-NEXT:    ldrh w8, [x0]
+; BE-NEXT:    strh w8, [sp, #12]
+; BE-NEXT:    add x8, x0, #2
+; BE-NEXT:    ldr s0, [sp, #12]
+; BE-NEXT:    rev32 v0.8b, v0.8b
+; BE-NEXT:    ushll v0.8h, v0.8b, #0
+; BE-NEXT:    ld1 { v0.b }[4], [x8]
+; BE-NEXT:    ushll v0.4s, v0.4h, #0
+; BE-NEXT:    shl v0.4s, v0.4s, #24
+; BE-NEXT:    sshr v0.4s, v0.4s, #24
+; BE-NEXT:    rev64 v0.4s, v0.4s
+; BE-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; BE-NEXT:    add sp, sp, #16
+; BE-NEXT:    ret
+  %l = load <3 x i8>, ptr %src, align 1
+  %e = sext <3 x i8> %l to <3 x i32>
+  ret <3 x i32> %e
+}
+
 define void @store_trunc_from_64bits(ptr %src, ptr %dst) {
 ; CHECK-LABEL: store_trunc_from_64bits:
 ; CHECK:       ; %bb.0: ; %entry
@@ -388,9 +468,9 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    add x9, x0, #4
 ; CHECK-NEXT:  Lloh0:
-; CHECK-NEXT:    adrp x8, lCPI9_0 at PAGE
+; CHECK-NEXT:    adrp x8, lCPI11_0 at PAGE
 ; CHECK-NEXT:  Lloh1:
-; CHECK-NEXT:    ldr d1, [x8, lCPI9_0 at PAGEOFF]
+; CHECK-NEXT:    ldr d1, [x8, lCPI11_0 at PAGEOFF]
 ; CHECK-NEXT:    add x8, x1, #1
 ; CHECK-NEXT:    ld1.h { v0 }[2], [x9]
 ; CHECK-NEXT:    add x9, x1, #2
@@ -409,8 +489,8 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
 ; BE-NEXT:    add x8, x0, #4
 ; BE-NEXT:    rev32 v0.4h, v0.4h
 ; BE-NEXT:    ld1 { v0.h }[2], [x8]
-; BE-NEXT:    adrp x8, .LCPI9_0
-; BE-NEXT:    add x8, x8, :lo12:.LCPI9_0
+; BE-NEXT:    adrp x8, .LCPI11_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI11_0
 ; BE-NEXT:    ld1 { v1.4h }, [x8]
 ; BE-NEXT:    add v0.4h, v0.4h, v1.4h
 ; BE-NEXT:    xtn v1.8b, v0.8h
@@ -538,9 +618,9 @@ define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldrh w9, [x0]
 ; CHECK-NEXT:  Lloh2:
-; CHECK-NEXT:    adrp x8, lCPI13_0 at PAGE
+; CHECK-NEXT:    adrp x8, lCPI15_0 at PAGE
 ; CHECK-NEXT:  Lloh3:
-; CHECK-NEXT:    ldr d1, [x8, lCPI13_0 at PAGEOFF]
+; CHECK-NEXT:    ldr d1, [x8, lCPI15_0 at PAGEOFF]
 ; CHECK-NEXT:    add x8, x1, #4
 ; CHECK-NEXT:    strh w9, [sp, #12]
 ; CHECK-NEXT:    add x9, x0, #2
@@ -566,8 +646,8 @@ define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
 ; BE-NEXT:    rev32 v0.8b, v0.8b
 ; BE-NEXT:    ushll v0.8h, v0.8b, #0
 ; BE-NEXT:    ld1 { v0.b }[4], [x8]
-; BE-NEXT:    adrp x8, .LCPI13_0
-; BE-NEXT:    add x8, x8, :lo12:.LCPI13_0
+; BE-NEXT:    adrp x8, .LCPI15_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI15_0
 ; BE-NEXT:    ld1 { v1.4h }, [x8]
 ; BE-NEXT:    add x8, x1, #4
 ; BE-NEXT:    bic v0.4h, #255, lsl #8
@@ -796,3 +876,115 @@ define void @shift_trunc_volatile_store(ptr %src, ptr %dst) {
   store volatile <3 x i8> %t, ptr %dst, align 1
   ret void
 }
+
+define void @load_v3i8_zext_to_3xi32_add_trunc_store(ptr %src) {
+; CHECK-LABEL: load_v3i8_zext_to_3xi32_add_trunc_store:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh4:
+; CHECK-NEXT:    adrp x8, lCPI22_0 at PAGE
+; CHECK-NEXT:  Lloh5:
+; CHECK-NEXT:    ldr q1, [x8, lCPI22_0 at PAGEOFF]
+; CHECK-NEXT:    add x8, x0, #1
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    add x9, x0, #2
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ld1.b { v0 }[4], [x9]
+; CHECK-NEXT:    uaddw.4s v0, v1, v0
+; CHECK-NEXT:    st1.b { v0 }[4], [x8]
+; CHECK-NEXT:    st1.b { v0 }[8], [x9]
+; CHECK-NEXT:    st1.b { v0 }[0], [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh4, Lloh5
+;
+; BE-LABEL: load_v3i8_zext_to_3xi32_add_trunc_store:
+; BE:       // %bb.0:
+; BE-NEXT:    sub sp, sp, #16
+; BE-NEXT:    .cfi_def_cfa_offset 16
+; BE-NEXT:    ldrh w9, [x0]
+; BE-NEXT:    adrp x8, .LCPI22_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI22_0
+; BE-NEXT:    ld1 { v1.4h }, [x8]
+; BE-NEXT:    strh w9, [sp, #12]
+; BE-NEXT:    add x9, x0, #2
+; BE-NEXT:    ldr s0, [sp, #12]
+; BE-NEXT:    rev32 v0.8b, v0.8b
+; BE-NEXT:    ushll v0.8h, v0.8b, #0
+; BE-NEXT:    ld1 { v0.b }[4], [x9]
+; BE-NEXT:    add v0.4h, v0.4h, v1.4h
+; BE-NEXT:    xtn v1.8b, v0.8h
+; BE-NEXT:    umov w8, v0.h[2]
+; BE-NEXT:    rev32 v1.16b, v1.16b
+; BE-NEXT:    str s1, [sp, #8]
+; BE-NEXT:    ldrh w9, [sp, #8]
+; BE-NEXT:    strb w8, [x0, #2]
+; BE-NEXT:    strh w9, [x0]
+; BE-NEXT:    add sp, sp, #16
+; BE-NEXT:    ret
+  %l = load <3 x i8>, ptr %src, align 1
+  %e = zext <3 x i8> %l to <3 x i32>
+  %add = add <3 x i32> %e, <i32 1, i32 2, i32 3>
+  %t = trunc <3 x i32> %add to <3 x i8>
+  store <3 x i8> %t, ptr %src
+  ret void
+}
+
+define void @load_v3i8_sext_to_3xi32_add_trunc_store(ptr %src) {
+; CHECK-LABEL: load_v3i8_sext_to_3xi32_add_trunc_store:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldrh w9, [x0]
+; CHECK-NEXT:  Lloh6:
+; CHECK-NEXT:    adrp x8, lCPI23_0 at PAGE
+; CHECK-NEXT:  Lloh7:
+; CHECK-NEXT:    ldr q1, [x8, lCPI23_0 at PAGEOFF]
+; CHECK-NEXT:    add x8, x0, #1
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    add x9, x0, #2
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll.8h v0, v0, #0
+; CHECK-NEXT:    ld1.b { v0 }[4], [x9]
+; CHECK-NEXT:    uaddw.4s v0, v1, v0
+; CHECK-NEXT:    st1.b { v0 }[4], [x8]
+; CHECK-NEXT:    st1.b { v0 }[8], [x9]
+; CHECK-NEXT:    st1.b { v0 }[0], [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+; CHECK-NEXT:    .loh AdrpLdr Lloh6, Lloh7
+;
+; BE-LABEL: load_v3i8_sext_to_3xi32_add_trunc_store:
+; BE:       // %bb.0:
+; BE-NEXT:    sub sp, sp, #16
+; BE-NEXT:    .cfi_def_cfa_offset 16
+; BE-NEXT:    ldrh w9, [x0]
+; BE-NEXT:    adrp x8, .LCPI23_0
+; BE-NEXT:    add x8, x8, :lo12:.LCPI23_0
+; BE-NEXT:    ld1 { v1.4h }, [x8]
+; BE-NEXT:    strh w9, [sp, #12]
+; BE-NEXT:    add x9, x0, #2
+; BE-NEXT:    ldr s0, [sp, #12]
+; BE-NEXT:    rev32 v0.8b, v0.8b
+; BE-NEXT:    ushll v0.8h, v0.8b, #0
+; BE-NEXT:    ld1 { v0.b }[4], [x9]
+; BE-NEXT:    add v0.4h, v0.4h, v1.4h
+; BE-NEXT:    xtn v1.8b, v0.8h
+; BE-NEXT:    umov w8, v0.h[2]
+; BE-NEXT:    rev32 v1.16b, v1.16b
+; BE-NEXT:    str s1, [sp, #8]
+; BE-NEXT:    ldrh w9, [sp, #8]
+; BE-NEXT:    strb w8, [x0, #2]
+; BE-NEXT:    strh w9, [x0]
+; BE-NEXT:    add sp, sp, #16
+; BE-NEXT:    ret
+  %l = load <3 x i8>, ptr %src, align 1
+  %e = sext <3 x i8> %l to <3 x i32>
+  %add = add <3 x i32> %e, <i32 1, i32 2, i32 3>
+  %t = trunc <3 x i32> %add to <3 x i8>
+  store <3 x i8> %t, ptr %src
+  ret void
+}


        


More information about the llvm-commits mailing list