[llvm] 5cf75ec - [AArch64][SME]: Generate streaming-compatible code for int/fp select/vselect

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 24 10:19:24 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-24T18:19:13Z
New Revision: 5cf75ecf04ceceab6c8434f5cec5652915d6b419

URL: https://github.com/llvm/llvm-project/commit/5cf75ecf04ceceab6c8434f5cec5652915d6b419
DIFF: https://github.com/llvm/llvm-project/commit/5cf75ecf04ceceab6c8434f5cec5652915d6b419.diff

LOG: [AArch64][SME]: Generate streaming-compatible code for int/fp select/vselect

To generate code compatible to streaming mode:
 - enable custom lowering for VSETCC, needed for (fp-vselect.ll, int-vselect.ll).

Differential Revision: https://reviews.llvm.org/D138519

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 067d2d9f2250..c29df3563ee0 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -12901,7 +12901,8 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
   if (Op.getValueType().isScalableVector())
     return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
 
-  if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType()))
+  if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
+                                   Subtarget->forceStreamingCompatibleSVE()))
     return LowerFixedLengthVectorSetccToSVE(Op, DAG);
 
   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
@@ -22867,7 +22868,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
   EVT InVT = Op.getOperand(0).getValueType();
   EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
 
-  assert(useSVEForFixedLengthVectorVT(InVT) &&
+  assert(InVT.isFixedLengthVector() && isTypeLegal(InVT) &&
          "Only expected to lower fixed length vector operation!");
   assert(Op.getValueType() == InVT.changeTypeToInteger() &&
          "Expected integer result of the same bit length as the inputs!");

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
index c29f02a14fde..38351d1e45d0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-vselect.ll
@@ -87,18 +87,21 @@ define void @select_v16f16(ptr %a, ptr %b) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    fcmeq v5.8h, v3.8h, v0.8h
-; CHECK-NEXT:    fcmeq v4.8h, v2.8h, v1.8h
-; CHECK-NEXT:    ldr q6, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT:    and z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z2.d, z2.d, z4.d
-; CHECK-NEXT:    eor z4.d, z4.d, z6.d
-; CHECK-NEXT:    eor z6.d, z5.d, z6.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    and z0.d, z0.d, z6.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    fcmeq p1.h, p0/z, z2.h, z1.h
+; CHECK-NEXT:    fcmeq p0.h, p0/z, z3.h, z0.h
+; CHECK-NEXT:    mov z5.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    and z2.d, z2.d, z5.d
+; CHECK-NEXT:    eor z5.d, z5.d, z4.d
+; CHECK-NEXT:    eor z4.d, z6.d, z4.d
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
 ; CHECK-NEXT:    orr z0.d, z3.d, z0.d
+; CHECK-NEXT:    orr z1.d, z2.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
@@ -161,18 +164,21 @@ define void @select_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    fcmeq v5.4s, v3.4s, v0.4s
-; CHECK-NEXT:    fcmeq v4.4s, v2.4s, v1.4s
-; CHECK-NEXT:    ldr q6, [x8, :lo12:.LCPI6_0]
-; CHECK-NEXT:    and z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z2.d, z2.d, z4.d
-; CHECK-NEXT:    eor z4.d, z4.d, z6.d
-; CHECK-NEXT:    eor z6.d, z5.d, z6.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    and z0.d, z0.d, z6.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    fcmeq p1.s, p0/z, z2.s, z1.s
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z3.s, z0.s
+; CHECK-NEXT:    mov z5.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    and z2.d, z2.d, z5.d
+; CHECK-NEXT:    eor z5.d, z5.d, z4.d
+; CHECK-NEXT:    eor z4.d, z6.d, z4.d
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
 ; CHECK-NEXT:    orr z0.d, z3.d, z0.d
+; CHECK-NEXT:    orr z1.d, z2.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
@@ -232,18 +238,21 @@ define void @select_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldp q0, q1, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    fcmeq v5.2d, v3.2d, v0.2d
-; CHECK-NEXT:    fcmeq v4.2d, v2.2d, v1.2d
-; CHECK-NEXT:    ldr q6, [x8, :lo12:.LCPI9_0]
-; CHECK-NEXT:    and z3.d, z3.d, z5.d
-; CHECK-NEXT:    and z2.d, z2.d, z4.d
-; CHECK-NEXT:    eor z4.d, z4.d, z6.d
-; CHECK-NEXT:    eor z6.d, z5.d, z6.d
-; CHECK-NEXT:    and z1.d, z1.d, z4.d
-; CHECK-NEXT:    and z0.d, z0.d, z6.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
+; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    fcmeq p1.d, p0/z, z2.d, z1.d
+; CHECK-NEXT:    fcmeq p0.d, p0/z, z3.d, z0.d
+; CHECK-NEXT:    mov z5.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    and z2.d, z2.d, z5.d
+; CHECK-NEXT:    eor z5.d, z5.d, z4.d
+; CHECK-NEXT:    eor z4.d, z6.d, z4.d
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z0.d, z0.d, z4.d
+; CHECK-NEXT:    and z1.d, z1.d, z5.d
 ; CHECK-NEXT:    orr z0.d, z3.d, z0.d
+; CHECK-NEXT:    orr z1.d, z2.d, z1.d
 ; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x double>, ptr %a

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
index 33fa29f3b7f6..5d6fd5339503 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-vselect.ll
@@ -75,21 +75,24 @@ define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask)
 define void @select_v32i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v32i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI3_0
+; CHECK-NEXT:    ptrue p0.b, vl16
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    cmeq v6.16b, v3.16b, v0.16b
 ; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI3_0]
-; CHECK-NEXT:    and z3.d, z3.d, z6.d
-; CHECK-NEXT:    cmeq v5.16b, v2.16b, v1.16b
+; CHECK-NEXT:    cmpeq p1.b, p0/z, z2.b, z0.b
+; CHECK-NEXT:    cmpeq p0.b, p0/z, z3.b, z1.b
+; CHECK-NEXT:    mov z5.b, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.b, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    eor z5.d, z5.d, z4.d
 ; CHECK-NEXT:    eor z4.d, z6.d, z4.d
-; CHECK-NEXT:    and z1.d, z1.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
-; CHECK-NEXT:    orr z0.d, z3.d, z0.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    and z0.d, z0.d, z5.d
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <32 x i8>, ptr %a
   %op2 = load <32 x i8>, ptr %b
@@ -172,21 +175,24 @@ define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) #
 define void @select_v16i16(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v16i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    ptrue p0.h, vl8
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    cmeq v6.8h, v3.8h, v0.8h
 ; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI7_0]
-; CHECK-NEXT:    and z3.d, z3.d, z6.d
-; CHECK-NEXT:    cmeq v5.8h, v2.8h, v1.8h
+; CHECK-NEXT:    cmpeq p1.h, p0/z, z2.h, z0.h
+; CHECK-NEXT:    cmpeq p0.h, p0/z, z3.h, z1.h
+; CHECK-NEXT:    mov z5.h, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.h, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    eor z5.d, z5.d, z4.d
 ; CHECK-NEXT:    eor z4.d, z6.d, z4.d
-; CHECK-NEXT:    and z1.d, z1.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
-; CHECK-NEXT:    orr z0.d, z3.d, z0.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    and z0.d, z0.d, z5.d
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x i16>, ptr %a
   %op2 = load <16 x i16>, ptr %b
@@ -246,21 +252,24 @@ define <4 x i32> @select_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) #
 define void @select_v8i32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI10_0
+; CHECK-NEXT:    ptrue p0.s, vl4
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    cmeq v6.4s, v3.4s, v0.4s
 ; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI10_0]
-; CHECK-NEXT:    and z3.d, z3.d, z6.d
-; CHECK-NEXT:    cmeq v5.4s, v2.4s, v1.4s
+; CHECK-NEXT:    cmpeq p1.s, p0/z, z2.s, z0.s
+; CHECK-NEXT:    cmpeq p0.s, p0/z, z3.s, z1.s
+; CHECK-NEXT:    mov z5.s, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.s, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    eor z5.d, z5.d, z4.d
 ; CHECK-NEXT:    eor z4.d, z6.d, z4.d
-; CHECK-NEXT:    and z1.d, z1.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
-; CHECK-NEXT:    orr z0.d, z3.d, z0.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    and z0.d, z0.d, z5.d
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x i32>, ptr %a
   %op2 = load <8 x i32>, ptr %b
@@ -317,21 +326,24 @@ define <2 x i64> @select_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) #
 define void @select_v4i64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: select_v4i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q1, q0, [x1]
 ; CHECK-NEXT:    adrp x8, .LCPI13_0
+; CHECK-NEXT:    ptrue p0.d, vl2
 ; CHECK-NEXT:    ldp q3, q2, [x0]
-; CHECK-NEXT:    cmeq v6.2d, v3.2d, v0.2d
 ; CHECK-NEXT:    ldr q4, [x8, :lo12:.LCPI13_0]
-; CHECK-NEXT:    and z3.d, z3.d, z6.d
-; CHECK-NEXT:    cmeq v5.2d, v2.2d, v1.2d
+; CHECK-NEXT:    cmpeq p1.d, p0/z, z2.d, z0.d
+; CHECK-NEXT:    cmpeq p0.d, p0/z, z3.d, z1.d
+; CHECK-NEXT:    mov z5.d, p1/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    mov z6.d, p0/z, #-1 // =0xffffffffffffffff
 ; CHECK-NEXT:    and z2.d, z2.d, z5.d
 ; CHECK-NEXT:    eor z5.d, z5.d, z4.d
 ; CHECK-NEXT:    eor z4.d, z6.d, z4.d
-; CHECK-NEXT:    and z1.d, z1.d, z5.d
-; CHECK-NEXT:    and z0.d, z0.d, z4.d
-; CHECK-NEXT:    orr z1.d, z2.d, z1.d
-; CHECK-NEXT:    orr z0.d, z3.d, z0.d
-; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    and z3.d, z3.d, z6.d
+; CHECK-NEXT:    and z1.d, z1.d, z4.d
+; CHECK-NEXT:    and z0.d, z0.d, z5.d
+; CHECK-NEXT:    orr z1.d, z3.d, z1.d
+; CHECK-NEXT:    orr z0.d, z2.d, z0.d
+; CHECK-NEXT:    stp q1, q0, [x0]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x i64>, ptr %a
   %op2 = load <4 x i64>, ptr %b


        


More information about the llvm-commits mailing list