<html>
    <head>
      <base href="https://bugs.llvm.org/">
    </head>
    <body><table border="1" cellspacing="0" cellpadding="8">
        <tr>
          <th>Bug ID</th>
          <td><a class="bz_bug_link 
          bz_status_NEW "
   title="NEW - [AARCH64] Clang should use xtn/shrn for shuffles"
   href="https://bugs.llvm.org/show_bug.cgi?id=43495">43495</a>
          </td>
        </tr>

        <tr>
          <th>Summary</th>
          <td>[AARCH64] Clang should use xtn/shrn for shuffles
          </td>
        </tr>

        <tr>
          <th>Product</th>
          <td>libraries
          </td>
        </tr>

        <tr>
          <th>Version</th>
          <td>9.0
          </td>
        </tr>

        <tr>
          <th>Hardware</th>
          <td>Other
          </td>
        </tr>

        <tr>
          <th>OS</th>
          <td>Linux
          </td>
        </tr>

        <tr>
          <th>Status</th>
          <td>NEW
          </td>
        </tr>

        <tr>
          <th>Severity</th>
          <td>enhancement
          </td>
        </tr>

        <tr>
          <th>Priority</th>
          <td>P
          </td>
        </tr>

        <tr>
          <th>Component</th>
          <td>Backend: AArch64
          </td>
        </tr>

        <tr>
          <th>Assignee</th>
          <td>unassignedbugs@nondot.org
          </td>
        </tr>

        <tr>
          <th>Reporter</th>
          <td>husseydevin@gmail.com
          </td>
        </tr>

        <tr>
          <th>CC</th>
          <td>arnaud.degrandmaison@arm.com, llvm-bugs@lists.llvm.org, peter.smith@linaro.org, Ties.Stuij@arm.com
          </td>
        </tr></table>
      <p>
        <div>
        <pre>On aarch64, shuffles suck.

Not only do you need to split up the instructions and use ext more, but they
are so much slower than on ARMv7a.

uint32x2_t get_even_lanes(uint32x4_t x)
{
    return __builtin_shufflevector(x, x, 0, 2);
}
uint32x2_t get_odd_lanes(uint32x4_t x)
{
    return __builtin_shufflevector(x, x, 1, 3);
}
uint32x2x2_t vzip_pairwise(uint32x4_t x)
{
    return vzip_u32(vget_low_u32(x), vget_high_u32(x));
}


Clang-9 emits this:

get_even_lanes:
        ext     v1.16b, v0.16b, v0.16b, #8
        zip1    v0.2s, v0.2s, v1.2s
        ret
get_odd_lanes:
        ext     v1.16b, v0.16b, v0.16b, #8
        zip1    v0.2s, v0.2s, v1.2s
        ret
vzip_pairwise:
        ext     v1.16b, v0.16b, v0.16b, #8
        zip1    v2.2s, v0.2s, v1.2s
        zip2    v1.2s, v0.2s, v1.2s
        mov     v0.16b, v2.16b
        ret

This is garbage.

It significantly better to do this instead:

get_even_lanes:
        xtn     v0.2s, v0.2d
        ret
get_odd_lanes:
        shrn    v0.2s, v0.2d, #32
        ret
vzip_pairwise:
        shrn    v1.2s, v0.2d, #32
        xtn     v0.2s, v0.2d
        ret

Side note: on 32-bit, if we kill the source, vshrn+vmovn and vrevNq+vuzpq (and
only using onne result) both lose a vzip in place, and is the best 0213 shuffle
aside from vld2.</pre>
        </div>
      </p>


      <hr>
      <span>You are receiving this mail because:</span>

      <ul>
          <li>You are on the CC list for the bug.</li>
      </ul>
    </body>
</html>