[llvm] 2c85461 - [AArch64][SVE] Implement lowering for SIGN_EXTEND etc. of SVE predicates.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Wed May 6 17:57:01 PDT 2020


Author: Eli Friedman
Date: 2020-05-06T17:56:32-07:00
New Revision: 2c8546107a91a7b9a31791452712676937df54fe

URL: https://github.com/llvm/llvm-project/commit/2c8546107a91a7b9a31791452712676937df54fe
DIFF: https://github.com/llvm/llvm-project/commit/2c8546107a91a7b9a31791452712676937df54fe.diff

LOG: [AArch64][SVE] Implement lowering for SIGN_EXTEND etc. of SVE predicates.

Now using patterns, since there's a single-instruction lowering. (We
could convert to VSELECT and pattern-match that, but there doesn't seem
to be much point.)

I think this might be the first instruction to use nested multiclasses
this way? It seems like a good way to reduce duplication between
different integer widths. Let me know if it seems like an improvement.

Also, while I'm here, fix the return type of SETCC so we don't try to
merge a sign-extend with a SETCC.

Differential Revision: https://reviews.llvm.org/D79193

Added: 
    llvm/test/CodeGen/AArch64/sve-sext-zext.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-fcmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d5e549cdee56..fa38ac618fc2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -991,10 +991,12 @@ void AArch64TargetLowering::addQRTypeForNEON(MVT VT) {
   addTypeForNEON(VT, MVT::v4i32);
 }
 
-EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
-                                              EVT VT) const {
+EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
+                                              LLVMContext &C, EVT VT) const {
   if (!VT.isVector())
     return MVT::i32;
+  if (VT.isScalableVector())
+    return EVT::getVectorVT(C, MVT::i1, VT.getVectorElementCount());
   return VT.changeVectorElementTypeToInteger();
 }
 

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 52d3c166fd50..b7e86d287331 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -4077,20 +4077,25 @@ multiclass sve_int_dup_imm_pred_merge<string asm> {
                   (!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPRAny:$Pg, 0, 0), 0>;
 }
 
-multiclass sve_int_dup_imm_pred_zero<string asm> {
-  def _B : sve_int_dup_imm_pred<0b00, 0, asm, ZPR8,  "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i8:$imm)>;
-  def _H : sve_int_dup_imm_pred<0b01, 0, asm, ZPR16, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i16:$imm)>;
-  def _S : sve_int_dup_imm_pred<0b10, 0, asm, ZPR32, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i32:$imm)>;
-  def _D : sve_int_dup_imm_pred<0b11, 0, asm, ZPR64, "/z", (ins PPRAny:$Pg, cpy_imm8_opt_lsl_i64:$imm)>;
-
-  def : InstAlias<"mov $Zd, $Pg/z, $imm",
-                  (!cast<Instruction>(NAME # _B) ZPR8:$Zd,  PPRAny:$Pg, cpy_imm8_opt_lsl_i8:$imm), 1>;
-  def : InstAlias<"mov $Zd, $Pg/z, $imm",
-                  (!cast<Instruction>(NAME # _H) ZPR16:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i16:$imm), 1>;
+multiclass sve_int_dup_imm_pred_zero_inst<
+    bits<2> sz8_64, string asm, ZPRRegOp zprty, ValueType intty,
+    ValueType predty, imm8_opt_lsl cpyimm> {
+  def NAME : sve_int_dup_imm_pred<sz8_64, 0, asm, zprty, "/z", (ins PPRAny:$Pg, cpyimm:$imm)>;
   def : InstAlias<"mov $Zd, $Pg/z, $imm",
-                  (!cast<Instruction>(NAME # _S) ZPR32:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i32:$imm), 1>;
-  def : InstAlias<"mov $Zd, $Pg/z, $imm",
-                  (!cast<Instruction>(NAME # _D) ZPR64:$Zd, PPRAny:$Pg, cpy_imm8_opt_lsl_i64:$imm), 1>;
+                  (!cast<Instruction>(NAME) zprty:$Zd, PPRAny:$Pg, cpyimm:$imm), 1>;
+  def : Pat<(intty (zext (predty PPRAny:$Ps1))),
+            (!cast<Instruction>(NAME) PPRAny:$Ps1, 1, 0)>;
+  def : Pat<(intty (sext (predty PPRAny:$Ps1))),
+            (!cast<Instruction>(NAME) PPRAny:$Ps1, -1, 0)>;
+  def : Pat<(intty (anyext (predty PPRAny:$Ps1))),
+            (!cast<Instruction>(NAME) PPRAny:$Ps1, 1, 0)>;
+}
+
+multiclass sve_int_dup_imm_pred_zero<string asm> {
+  defm _B : sve_int_dup_imm_pred_zero_inst<0b00, asm, ZPR8,  nxv16i8, nxv16i1, cpy_imm8_opt_lsl_i8>;
+  defm _H : sve_int_dup_imm_pred_zero_inst<0b01, asm, ZPR16, nxv8i16, nxv8i1,  cpy_imm8_opt_lsl_i16>;
+  defm _S : sve_int_dup_imm_pred_zero_inst<0b10, asm, ZPR32, nxv4i32, nxv4i1,  cpy_imm8_opt_lsl_i32>;
+  defm _D : sve_int_dup_imm_pred_zero_inst<0b11, asm, ZPR64, nxv2i64, nxv2i1,  cpy_imm8_opt_lsl_i64>;
 }
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AArch64/sve-fcmp.ll b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
index f1426bb3179d..cbafae608262 100644
--- a/llvm/test/CodeGen/AArch64/sve-fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fcmp.ll
@@ -229,3 +229,27 @@ define <vscale x 8 x i1> @ueq_8f16(<vscale x 8 x half> %x, <vscale x 8 x half> %
   %y = fcmp ueq <vscale x 8 x half> %x, %x2
   ret <vscale x 8 x i1> %y
 }
+
+define <vscale x 4 x i32> @oeq_4f32_sext(<vscale x 4 x float> %x, <vscale x 4 x float> %x2) {
+; CHECK-LABEL: oeq_4f32_sext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ret
+  %y = fcmp oeq <vscale x 4 x float> %x, %x2
+  %r = sext <vscale x 4 x i1> %y to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x i32> @oeq_4f32_zext(<vscale x 4 x float> %x, <vscale x 4 x float> %x2) {
+; CHECK-LABEL: oeq_4f32_zext:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    fcmeq p0.s, p0/z, z0.s, z1.s
+; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT:    ret
+  %y = fcmp oeq <vscale x 4 x float> %x, %x2
+  %r = zext <vscale x 4 x i1> %y to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}

diff  --git a/llvm/test/CodeGen/AArch64/sve-sext-zext.ll b/llvm/test/CodeGen/AArch64/sve-sext-zext.ll
new file mode 100644
index 000000000000..f9a527c1fc8c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-sext-zext.ll
@@ -0,0 +1,188 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s | FileCheck %s
+
+define <vscale x 16 x i8> @sext_i1_i8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: sext_i1_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 8 x i16> @sext_i1_i16(<vscale x 8 x i1> %a) {
+; CHECK-LABEL: sext_i1_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.h, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %a to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @sext_i1_i32(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: sext_i1_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.s, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i1_i64(<vscale x 2 x i1> %a) {
+; CHECK-LABEL: sext_i1_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.d, p0/z, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 16 x i8> @zext_i1_i8(<vscale x 16 x i1> %a) {
+; CHECK-LABEL: zext_i1_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 8 x i16> @zext_i1_i16(<vscale x 8 x i1> %a) {
+; CHECK-LABEL: zext_i1_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.h, p0/z, #1 // =0x1
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %a to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @zext_i1_i32(<vscale x 4 x i1> %a) {
+; CHECK-LABEL: zext_i1_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i1_i64(<vscale x 2 x i1> %a) {
+; CHECK-LABEL: zext_i1_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 8 x i16> @sext_i8_i16(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: sext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    sxtb z0.h, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @sext_i8_i32(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: sext_i8_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sxtb z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i8_i64(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: sext_i8_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sxtb z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i8> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 8 x i16> @zext_i8_i16(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: zext_i8_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.h, z0.h, #0xff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 4 x i32> @zext_i8_i32(<vscale x 4 x i8> %a) {
+; CHECK-LABEL: zext_i8_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.s, z0.s, #0xff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i8_i64(<vscale x 2 x i8> %a) {
+; CHECK-LABEL: zext_i8_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.d, z0.d, #0xff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i8> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 4 x i32> @sext_i16_i32(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: sext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    sxth z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @sext_i16_i64(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: sext_i16_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sxth z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i16> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 4 x i32> @zext_i16_i32(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: zext_i16_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.s, z0.s, #0xffff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 2 x i64> @zext_i16_i64(<vscale x 2 x i16> %a) {
+; CHECK-LABEL: zext_i16_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.d, z0.d, #0xffff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i16> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @sext_i32_i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: sext_i32_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    sxtw z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @zext_i32_i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: zext_i32_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and z0.d, z0.d, #0xffffffff
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}


        


More information about the llvm-commits mailing list