[llvm] 9eb6989 - [AArch64][SME]: Make 'Expand' the default action for all Ops.

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 6 07:32:20 PST 2023


Author: Hassnaa Hamdi
Date: 2023-01-06T15:32:07Z
New Revision: 9eb698946dc1df13d413174f7e04d7194d7e9502

URL: https://github.com/llvm/llvm-project/commit/9eb698946dc1df13d413174f7e04d7194d7e9502
DIFF: https://github.com/llvm/llvm-project/commit/9eb698946dc1df13d413174f7e04d7194d7e9502.diff

LOG: [AArch64][SME]: Make 'Expand' the default action for all Ops.

By default expand all operations, then change to Custom/Legal if needed.

Reviewed By: sdesmalen

Differential Revision: https://reviews.llvm.org/D141068

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b417ce002655..e5e314cc02a0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1661,6 +1661,40 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
 }
 
 void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) {
+  // By default set all operations to Expand,
+  // then change to Legal/Custom if needed.
+  for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
+    setOperationAction(Op, VT, Expand);
+
+  // STORE, LOAD, SCALAR_TO_VECTOR and BITCAST are natively supported,
+  // so no need to Custom/Expand them.
+  setOperationAction(ISD::STORE, VT, Legal);
+  setOperationAction(ISD::LOAD, VT, Legal);
+  setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
+  setOperationAction(ISD::BITCAST, VT, Legal);
+
+  setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
+  setOperationAction(ISD::MLOAD, VT, Custom);
+  setOperationAction(ISD::MSTORE, VT, Custom);
+  setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
+  setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+  setOperationAction(ISD::SDIV, VT, Custom);
+  setOperationAction(ISD::SHL, VT, Custom);
+  setOperationAction(ISD::SRA, VT, Custom);
+  setOperationAction(ISD::SRL, VT, Custom);
+  setOperationAction(ISD::OR, VT, Custom);
+  setOperationAction(ISD::SETCC, VT, Custom);
+  setOperationAction(ISD::UDIV, VT, Custom);
+  setOperationAction(ISD::SINT_TO_FP, VT, Custom);
+  setOperationAction(ISD::FP_TO_SINT, VT, Custom);
+  setOperationAction(ISD::FP_TO_UINT, VT, Custom);
+  setOperationAction(ISD::UINT_TO_FP, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
+  setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+  setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
+  setOperationAction(ISD::FCOPYSIGN, VT, Custom);
   setOperationAction(ISD::ANY_EXTEND, VT, Custom);
   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
   setOperationAction(ISD::SIGN_EXTEND, VT, Custom);

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
index aac9373952b3..06e2da875a98 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
@@ -61,15 +61,15 @@ define void @select_v16f16(ptr %a, ptr %b, i1 %mask) #0 {
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    csetm w8, ne
 ; CHECK-NEXT:    ldr q1, [x0, #16]
-; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    ldr q3, [x1, #16]
+; CHECK-NEXT:    ldr q2, [x1, #16]
+; CHECK-NEXT:    ldr q3, [x1]
 ; CHECK-NEXT:    mov z4.h, w8
 ; CHECK-NEXT:    bic z2.d, z2.d, z4.d
 ; CHECK-NEXT:    and z0.d, z0.d, z4.d
 ; CHECK-NEXT:    bic z3.d, z3.d, z4.d
 ; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    orr z0.d, z0.d, z2.d
-; CHECK-NEXT:    orr z1.d, z1.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
+; CHECK-NEXT:    orr z1.d, z1.d, z2.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load volatile <16 x half>, ptr %a
@@ -121,21 +121,21 @@ define void @select_v8f32(ptr %a, ptr %b, i1 %mask) #0 {
 ; CHECK-LABEL: select_v8f32:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w2, #0x1
-; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    csetm w8, ne
-; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    mvn w9, w8
-; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    ldr q3, [x1, #16]
+; CHECK-NEXT:    ldr q2, [x1, #16]
+; CHECK-NEXT:    ldr q3, [x1]
 ; CHECK-NEXT:    mov z4.s, w8
 ; CHECK-NEXT:    mov z5.s, w9
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
 ; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    and z2.d, z2.d, z5.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
 ; CHECK-NEXT:    and z3.d, z3.d, z5.d
-; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load volatile <8 x float>, ptr %a
   %op2 = load volatile <8 x float>, ptr %b
@@ -186,21 +186,21 @@ define void @select_v4f64(ptr %a, ptr %b, i1 %mask) #0 {
 ; CHECK-LABEL: select_v4f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    tst w2, #0x1
-; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ldr q0, [x0, #16]
 ; CHECK-NEXT:    csetm x8, ne
-; CHECK-NEXT:    ldr q1, [x0, #16]
+; CHECK-NEXT:    ldr q1, [x0]
 ; CHECK-NEXT:    mvn x9, x8
-; CHECK-NEXT:    ldr q2, [x1]
-; CHECK-NEXT:    ldr q3, [x1, #16]
+; CHECK-NEXT:    ldr q2, [x1, #16]
+; CHECK-NEXT:    ldr q3, [x1]
 ; CHECK-NEXT:    mov z4.d, x8
 ; CHECK-NEXT:    mov z5.d, x9
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
 ; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    and z2.d, z2.d, z5.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
 ; CHECK-NEXT:    and z3.d, z3.d, z5.d
-; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    orr z1.d, z1.d, z3.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load volatile <4 x double>, ptr %a
   %op2 = load volatile <4 x double>, ptr %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
index 4477139436ec..10f8c1b655ce 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-to-int.ll
@@ -718,8 +718,8 @@ define <1 x i64> @fcvtzu_v1f64_v1i64(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzu_v1f64_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    fcvtzu x8, d0
-; CHECK-NEXT:    mov z0.d, x8
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    fcvtzu z0.d, p0/m, z0.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptoui <1 x double> %op1 to <1 x i64>
@@ -1471,8 +1471,8 @@ define <1 x i64> @fcvtzs_v1f64_v1i64(<1 x double> %op1) #0 {
 ; CHECK-LABEL: fcvtzs_v1f64_v1i64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
-; CHECK-NEXT:    fcvtzs x8, d0
-; CHECK-NEXT:    mov z0.d, x8
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    fcvtzs z0.d, p0/m, z0.d
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %res = fptosi <1 x double> %op1 to <1 x i64>

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index 0830df07745a..bb4d70eaad21 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -70,20 +70,20 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, <8 x i1> %mask
 define void @select_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v16f16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q3, q2, [x1]
-; CHECK-NEXT:    fcmeq p1.h, p0/z, z1.h, z2.h
-; CHECK-NEXT:    fcmeq p0.h, p0/z, z0.h, z3.h
+; CHECK-NEXT:    fcmeq p1.h, p0/z, z0.h, z2.h
+; CHECK-NEXT:    fcmeq p0.h, p0/z, z1.h, z3.h
 ; CHECK-NEXT:    mov z4.h, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    mov z5.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    bic z2.d, z2.d, z4.d
 ; CHECK-NEXT:    bic z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z5.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    orr z0.d, z0.d, z3.d
-; CHECK-NEXT:    orr z1.d, z1.d, z2.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    orr z1.d, z1.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
   %op2 = load <16 x half>, ptr %b
@@ -133,20 +133,20 @@ define <4 x float> @select_v4f32(<4 x float> %op1, <4 x float> %op2, <4 x i1> %m
 define void @select_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q3, q2, [x1]
-; CHECK-NEXT:    fcmeq p1.s, p0/z, z1.s, z2.s
-; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, z3.s
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z0.s, z2.s
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z1.s, z3.s
 ; CHECK-NEXT:    mov z4.s, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    mov z5.s, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    bic z2.d, z2.d, z4.d
 ; CHECK-NEXT:    bic z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z5.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    orr z0.d, z0.d, z3.d
-; CHECK-NEXT:    orr z1.d, z1.d, z2.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    orr z1.d, z1.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
   %op2 = load <8 x float>, ptr %b
@@ -197,20 +197,20 @@ define <2 x double> @select_v2f64(<2 x double> %op1, <2 x double> %op2, <2 x i1>
 define void @select_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ldp q1, q0, [x0]
 ; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q3, q2, [x1]
-; CHECK-NEXT:    fcmeq p1.d, p0/z, z1.d, z2.d
-; CHECK-NEXT:    fcmeq p0.d, p0/z, z0.d, z3.d
+; CHECK-NEXT:    fcmeq p1.d, p0/z, z0.d, z2.d
+; CHECK-NEXT:    fcmeq p0.d, p0/z, z1.d, z3.d
 ; CHECK-NEXT:    mov z4.d, p1/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    mov z5.d, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    bic z2.d, z2.d, z4.d
 ; CHECK-NEXT:    bic z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z5.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    orr z0.d, z0.d, z3.d
-; CHECK-NEXT:    orr z1.d, z1.d, z2.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    orr z1.d, z1.d, z3.d
+; CHECK-NEXT:    orr z0.d, z0.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a
   %op2 = load <4 x double>, ptr %b

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
index 820114fc30dc..28d24016e7d2 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-insert-vector-elt.ll
@@ -398,12 +398,12 @@ define <4 x double> @insertelement_v4f64(ptr %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x0]
 ; CHECK-NEXT:    mov w8, #1
-; CHECK-NEXT:    fmov d3, #5.00000000
-; CHECK-NEXT:    index z4.d, #0, #1
+; CHECK-NEXT:    fmov d4, #5.00000000
+; CHECK-NEXT:    index z2.d, #0, #1
 ; CHECK-NEXT:    ptrue p0.d
-; CHECK-NEXT:    mov z2.d, x8
-; CHECK-NEXT:    cmpeq p0.d, p0/z, z4.d, z2.d
-; CHECK-NEXT:    mov z1.d, p0/m, d3
+; CHECK-NEXT:    mov z3.d, x8
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z2.d, z3.d
+; CHECK-NEXT:    mov z1.d, p0/m, d4
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 killed $z1
 ; CHECK-NEXT:    ret
     %op1 = load <4 x double>, ptr %a


        


More information about the llvm-commits mailing list