[llvm] [AArch64][GlobalISel] Refactor BITCAST Legalization (PR #80505)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 5 03:37:57 PST 2024


================
@@ -15,13 +33,552 @@ define <4 x i16> @foo1(<2 x i32> %a) {
 }
 
 define <4 x i16> @foo2(<2 x i32> %a) {
-; CHECK-LABEL: foo2:
-; CHECK:       movi	v0.2d, #0000000000000000
-; CHECK-NEXT:  ret
-
+; CHECK-SD-LABEL: foo2:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: foo2:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #712 // =0x2c8
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    mov v1.s[1], w8
+; CHECK-GI-NEXT:    zip1 v0.2s, v1.2s, v0.2s
+; CHECK-GI-NEXT:    rev32 v0.4h, v0.4h
+; CHECK-GI-NEXT:    ret
   %1 = shufflevector <2 x i32> <i32 712, i32 undef>, <2 x i32> %a, <2 x i32> <i32 0, i32 2>
 ; Can't optimize the following bitcast to scalar_to_vector.
   %2 = bitcast <2 x i32> %1 to <4 x i16>
   %3 = shufflevector <4 x i16> %2, <4 x i16> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
   ret <4 x i16> %3
 }
+
+; ===== To and From Scalar Types =====
+
+define i32 @bitcast_v4i8_i32(<4 x i8> %a, <4 x i8> %b){
+; CHECK-LABEL: bitcast_v4i8_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+    %c = add <4 x i8> %a, %b
+    %d = bitcast <4 x i8> %c to i32
+    ret i32 %d
+}
+
+define <4 x i8> @bitcast_i32_v4i8(i32 %a, i32 %b){
+; CHECK-LABEL: bitcast_i32_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, w1
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    zip1 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+    %c = add i32 %a, %b
+    %d = bitcast i32 %c to <4 x i8>
+    ret <4 x i8> %d
+}
+
+define i32 @bitcast_v2i16_i32(<2 x i16> %a, <2 x i16> %b){
+; CHECK-LABEL: bitcast_v2i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    strh w8, [sp, #14]
+; CHECK-NEXT:    ldr w0, [sp, #12]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+    %c = add <2 x i16> %a, %b
+    %d = bitcast <2 x i16> %c to i32
+    ret i32 %d
+}
+
+define <2 x i16> @bitcast_i32_v2i16(i32 %a, i32 %b){
+; CHECK-LABEL: bitcast_i32_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add w8, w0, w1
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+    %c = add i32 %a, %b
+    %d = bitcast i32 %c to <2 x i16>
+    ret <2 x i16> %d
+}
+
+define i64 @bitcast_v8i8_i64(<8 x i8> %a, <8 x i8> %b){
+; CHECK-LABEL: bitcast_v8i8_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+    %c = add <8 x i8> %a, %b
+    %d = bitcast <8 x i8> %c to i64
+    ret i64 %d
+}
+
+define <8 x i8> @bitcast_i64_v8i8(i64 %a, i64 %b){
+; CHECK-LABEL: bitcast_i64_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+    %c = add i64 %a, %b
+    %d = bitcast i64 %c to <8 x i8>
+    ret <8 x i8> %d
+}
+
+define i64 @bitcast_v4i16_i64(<4 x i16> %a, <4 x i16> %b){
+; CHECK-LABEL: bitcast_v4i16_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+    %c = add <4 x i16> %a, %b
+    %d = bitcast <4 x i16> %c to i64
+    ret i64 %d
+}
+
+define <4 x i16> @bitcast_i64_v4i16(i64 %a, i64 %b){
+; CHECK-LABEL: bitcast_i64_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+    %c = add i64 %a, %b
+    %d = bitcast i64 %c to <4 x i16>
+    ret <4 x i16> %d
+}
+
+define i64 @bitcast_v2i32_i64(<2 x i32> %a, <2 x i32> %b){
+; CHECK-LABEL: bitcast_v2i32_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+    %c = add <2 x i32> %a, %b
+    %d = bitcast <2 x i32> %c to i64
+    ret i64 %d
+}
+
+define <2 x i32> @bitcast_i64_v2i32(i64 %a, i64 %b){
+; CHECK-LABEL: bitcast_i64_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add x8, x0, x1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    ret
+    %c = add i64 %a, %b
+    %d = bitcast i64 %c to <2 x i32>
+    ret <2 x i32> %d
+}
+
+; ===== Legal Vector Types =====
+
+define <4 x i16> @bitcast_v2i32_v4i16(<2 x i32> %a, <2 x i32> %b){
+; CHECK-LABEL: bitcast_v2i32_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+    %c = add <2 x i32> %a, %b
+    %d = bitcast <2 x i32> %c to <4 x i16>
+    ret <4 x i16> %d
+}
+
+define <4 x i32> @bitcast_v2i64_v4i32(<2 x i64> %a, <2 x i64> %b){
+; CHECK-LABEL: bitcast_v2i64_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
+    %c = add <2 x i64> %a, %b
+    %d = bitcast <2 x i64> %c to <4 x i32>
+    ret <4 x i32> %d
+}
+
+define <8 x i8> @bitcast_v2i32_v8i8(<2 x i32> %a, <2 x i32> %b){
+; CHECK-LABEL: bitcast_v2i32_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    ret
+    %c = add <2 x i32> %a, %b
+    %d = bitcast <2 x i32> %c to <8 x i8>
+    ret <8 x i8> %d
+}
+
+define <8 x i16> @bitcast_v2i64_v8i16(<2 x i64> %a, <2 x i64> %b){
+; CHECK-LABEL: bitcast_v2i64_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
+    %c = add <2 x i64> %a, %b
+    %d = bitcast <2 x i64> %c to <8 x i16>
+    ret <8 x i16> %d
+}
+
+define <16 x i8> @bitcast_v2i64_v16i8(<2 x i64> %a, <2 x i64> %b){
+; CHECK-LABEL: bitcast_v2i64_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-NEXT:    ret
+    %c = add <2 x i64> %a, %b
+    %d = bitcast <2 x i64> %c to <16 x i8>
+    ret <16 x i8> %d
+}
+
+define <2 x i32> @bitcast_v4i16_v2i32(<4 x i16> %a, <4 x i16> %b){
+; CHECK-LABEL: bitcast_v4i16_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    ret
+    %c = add <4 x i16> %a, %b
+    %d = bitcast <4 x i16> %c to <2 x i32>
+    ret <2 x i32> %d
+}
+
+define <2 x i64> @bitcast_v4i32_v2i64(<4 x i32> %a, <4 x i32> %b){
+; CHECK-LABEL: bitcast_v4i32_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+    %c = add <4 x i32> %a, %b
+    %d = bitcast <4 x i32> %c to <2 x i64>
+    ret <2 x i64> %d
+}
+
+define <8 x i8> @bitcast_v4i16_v8i8(<4 x i16> %a, <4 x i16> %b){
+; CHECK-LABEL: bitcast_v4i16_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    ret
+    %c = add <4 x i16> %a, %b
+    %d = bitcast <4 x i16> %c to <8 x i8>
+    ret <8 x i8> %d
+}
+
+define <8 x i16> @bitcast_v4i32_v8i16(<4 x i32> %a, <4 x i32> %b){
+; CHECK-LABEL: bitcast_v4i32_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+    %c = add <4 x i32> %a, %b
+    %d = bitcast <4 x i32> %c to <8 x i16>
+    ret <8 x i16> %d
+}
+
+define <16 x i8> @bitcast_v4i32_v16i8(<4 x i32> %a, <4 x i32> %b){
+; CHECK-LABEL: bitcast_v4i32_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+    %c = add <4 x i32> %a, %b
+    %d = bitcast <4 x i32> %c to <16 x i8>
+    ret <16 x i8> %d
+}
+
+define <2 x i32> @bitcast_v8i8_v2i32(<8 x i8> %a, <8 x i8> %b){
+; CHECK-LABEL: bitcast_v8i8_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ret
+    %c = add <8 x i8> %a, %b
+    %d = bitcast <8 x i8> %c to <2 x i32>
+    ret <2 x i32> %d
+}
+
+define <2 x i64> @bitcast_v8i16_v2i64(<8 x i16> %a, <8 x i16> %b){
+; CHECK-LABEL: bitcast_v8i16_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
+    %c = add <8 x i16> %a, %b
+    %d = bitcast <8 x i16> %c to <2 x i64>
+    ret <2 x i64> %d
+}
+
+define <4 x i16> @bitcast_v8i8_v4i16(<8 x i8> %a, <8 x i8> %b){
+; CHECK-LABEL: bitcast_v8i8_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8b, v0.8b, v1.8b
+; CHECK-NEXT:    ret
+    %c = add <8 x i8> %a, %b
+    %d = bitcast <8 x i8> %c to <4 x i16>
+    ret <4 x i16> %d
+}
+
+define <4 x i32> @bitcast_v8i16_v4i32(<8 x i16> %a, <8 x i16> %b){
+; CHECK-LABEL: bitcast_v8i16_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
+    %c = add <8 x i16> %a, %b
+    %d = bitcast <8 x i16> %c to <4 x i32>
+    ret <4 x i32> %d
+}
+
+define <16 x i8> @bitcast_v8i16_v16i8(<8 x i16> %a, <8 x i16> %b){
+; CHECK-LABEL: bitcast_v8i16_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    ret
+    %c = add <8 x i16> %a, %b
+    %d = bitcast <8 x i16> %c to <16 x i8>
+    ret <16 x i8> %d
+}
+
+define <2 x i64> @bitcast_v16i8_v2i64(<16 x i8> %a, <16 x i8> %b){
+; CHECK-LABEL: bitcast_v16i8_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+    %c = add <16 x i8> %a, %b
+    %d = bitcast <16 x i8> %c to <2 x i64>
+    ret <2 x i64> %d
+}
+
+define <4 x i32> @bitcast_v16i8_v4i32(<16 x i8> %a, <16 x i8> %b){
+; CHECK-LABEL: bitcast_v16i8_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+    %c = add <16 x i8> %a, %b
+    %d = bitcast <16 x i8> %c to <4 x i32>
+    ret <4 x i32> %d
+}
+
+define <8 x i16> @bitcast_v16i8_v8i16(<16 x i8> %a, <16 x i8> %b){
+; CHECK-LABEL: bitcast_v16i8_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    add v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    ret
+    %c = add <16 x i8> %a, %b
+    %d = bitcast <16 x i8> %c to <8 x i16>
+    ret <8 x i16> %d
+}
+
+; ===== Smaller/Larger Width Vectors with Legal Element Sizes =====
+
+define <4 x i8> @bitcast_v2i16_v4i8(<2 x i16> %a, <2 x i16> %b){
+; CHECK-LABEL: bitcast_v2i16_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    add v0.2s, v0.2s, v1.2s
+; CHECK-NEXT:    mov w8, v0.s[1]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    strh w8, [sp, #14]
+; CHECK-NEXT:    ldr s0, [sp, #12]
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+    %c = add <2 x i16> %a, %b
+    %d = bitcast <2 x i16> %c to <4 x i8>
+    ret <4 x i8> %d
+}
+
+define <2 x i16> @bitcast_v4i8_v2i16(<4 x i8> %a, <4 x i8> %b){
+; CHECK-LABEL: bitcast_v4i8_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
+; CHECK-NEXT:    add x8, sp, #12
+; CHECK-NEXT:    xtn v0.8b, v0.8h
+; CHECK-NEXT:    str s0, [sp, #12]
+; CHECK-NEXT:    ld1 { v0.h }[0], [x8]
+; CHECK-NEXT:    orr x8, x8, #0x2
+; CHECK-NEXT:    ld1 { v0.h }[2], [x8]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+    %c = add <4 x i8> %a, %b
+    %d = bitcast <4 x i8> %c to <2 x i16>
+    ret <2 x i16> %d
+}
+
+define <8 x i32> @bitcast_v4i64_v8i32(<4 x i64> %a, <4 x i64> %b){
+; CHECK-SD-LABEL: bitcast_v4i64_v8i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    add v1.2d, v1.2d, v3.2d
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bitcast_v4i64_v8i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-GI-NEXT:    add v1.2d, v1.2d, v3.2d
+; CHECK-GI-NEXT:    ret
+    %c = add <4 x i64> %a, %b
+    %d = bitcast <4 x i64> %c to <8 x i32>
+    ret <8 x i32> %d
+}
+
+define <16 x i16> @bitcast_v4i64_v16i16(<4 x i64> %a, <4 x i64> %b){
+; CHECK-SD-LABEL: bitcast_v4i64_v16i16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    add v1.2d, v1.2d, v3.2d
+; CHECK-SD-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: bitcast_v4i64_v16i16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-GI-NEXT:    add v1.2d, v1.2d, v3.2d
+; CHECK-GI-NEXT:    ret
+    %c = add <4 x i64> %a, %b
+    %d = bitcast <4 x i64> %c to <16 x i16>
+    ret <16 x i16> %d
----------------
arsenm wrote:

IR is usually 2 space indent, not 4

https://github.com/llvm/llvm-project/pull/80505


More information about the llvm-commits mailing list