[llvm] [AArch64] isTBLMask(M, VT) as part of the shuffle mask check (PR #81748)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 14 07:51:24 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: AtariDreams (AtariDreams)

<details>
<summary>Changes</summary>



---
Full diff: https://github.com/llvm/llvm-project/pull/81748.diff


1 Files Affected:

- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+14-4) 


``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 8c5a4cdae11634..cc5d974c4312a8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11498,6 +11498,8 @@ static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
 // vector sources of the shuffle are different.
 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
                       unsigned &Imm) {
+  ReverseEXT = false;
+
   // Look for the first non-undef element.
   const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; });
 
@@ -11535,6 +11537,14 @@ static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
   return true;
 }
 
+static bool isTBLMask(ArrayRef<int> M, EVT VT) {
+  // We can handle <16 x i8> and <8 x i8> vector shuffles. If the index in the
+  // mask is out of range, then 0 is placed into the resulting vector. So pretty
+  // much any mask of 16 or 8 elements can work here.
+  return (VT == MVT::v8i8 && M.size() == 8) ||
+         (VT == MVT::v16i8 && M.size() == 16);
+}
+
 /// isREVMask - Check if a vector shuffle corresponds to a REV
 /// instruction with the specified blocksize.  (The order of the elements
 /// within each block of the vector is reversed.)
@@ -11569,7 +11579,7 @@ static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
     return false;
   WhichResult = (M[0] == 0 ? 0 : 1);
   unsigned Idx = WhichResult * NumElts / 2;
-  for (unsigned i = 0; i != NumElts; i += 2) {
+  for (unsigned i = 0; i < NumElts; i += 2) {
     if ((M[i] >= 0 && (unsigned)M[i] != Idx) ||
         (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts))
       return false;
@@ -12284,7 +12294,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                        DAG.getConstant(8, dl, MVT::i32));
   }
 
-  bool ReverseEXT = false;
+  bool ReverseEXT;
   unsigned Imm;
   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
     if (ReverseEXT)
@@ -13815,7 +13825,7 @@ bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
   return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) ||
           isREVMask(M, VT, 32) || isREVMask(M, VT, 16) ||
           isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
-          // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM.
+          (isTBLMask(M, VT) && VT.getScalarSizeInBits() < 32) ||
           isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) ||
           isZIPMask(M, VT, DummyUnsigned) ||
           isTRN_v_undef_Mask(M, VT, DummyUnsigned) ||
@@ -26888,7 +26898,7 @@ SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
     return convertFromScalableVector(DAG, VT, Op);
   }
 
-  bool ReverseEXT = false;
+  bool ReverseEXT;
   unsigned Imm;
   if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm) &&
       Imm == VT.getVectorNumElements() - 1) {

``````````

</details>


https://github.com/llvm/llvm-project/pull/81748


More information about the llvm-commits mailing list