<html>
    <head>
      <base href="https://bugs.llvm.org/">
    </head>
    <body><table border="1" cellspacing="0" cellpadding="8">
        <tr>
          <th>Bug ID</th>
          <td><a class="bz_bug_link 
          bz_status_NEW "
   title="NEW - [X86][SSE] Improve general memcmp support"
   href="https://bugs.llvm.org/show_bug.cgi?id=33329">33329</a>
          </td>
        </tr>

        <tr>
          <th>Summary</th>
          <td>[X86][SSE] Improve general memcmp support
          </td>
        </tr>

        <tr>
          <th>Product</th>
          <td>libraries
          </td>
        </tr>

        <tr>
          <th>Version</th>
          <td>trunk
          </td>
        </tr>

        <tr>
          <th>Hardware</th>
          <td>PC
          </td>
        </tr>

        <tr>
          <th>OS</th>
          <td>Windows NT
          </td>
        </tr>

        <tr>
          <th>Status</th>
          <td>NEW
          </td>
        </tr>

        <tr>
          <th>Severity</th>
          <td>enhancement
          </td>
        </tr>

        <tr>
          <th>Priority</th>
          <td>P
          </td>
        </tr>

        <tr>
          <th>Component</th>
          <td>Backend: X86
          </td>
        </tr>

        <tr>
          <th>Assignee</th>
          <td>unassignedbugs@nondot.org
          </td>
        </tr>

        <tr>
          <th>Reporter</th>
          <td>llvm-dev@redking.me.uk
          </td>
        </tr>

        <tr>
          <th>CC</th>
          <td>andrea.dibiagio@gmail.com, filcab@gmail.com, llvm-bugs@lists.llvm.org, spatel+llvm@rotateright.com
          </td>
        </tr></table>
      <p>
        <div>
        <pre>Currently memcmp lowers to a libcall:

int cmp16(const char *a, const char *b) {
        return __builtin_memcmp(a, b, 16);
}

cmp16(char const*, char const*):
        movl    $16, %edx
        jmp     memcmp

Which depending on the lib implementation/shim calls can be quite slow. A
possible alternative would be to vectorize this, using a more complex version
of what we do for 'equality' memcmp (<a class="bz_bug_link 
          bz_status_NEW "
   title="NEW - [X86][SSE] Improve equality memcmp support"
   href="show_bug.cgi?id=33325">Bug #33325</a>):

#include <x86intrin.h>
int cmp16(const char *a, const char *b) {
  __m128i va = _mm_loadu_si128((const __m128i*)a);
  __m128i vb = _mm_loadu_si128((const __m128i*)b);
  unsigned eq = _mm_movemask_epi8(_mm_cmpeq_epi8(va, vb));

  __m128i kSign = _mm_set1_epi8(0x80);
  va = _mm_xor_si128(va, kSign);
  vb = _mm_xor_si128(vb, kSign);
  unsigned gt = _mm_movemask_epi8(_mm_cmpgt_epi8(va, vb));
  unsigned lt = _mm_movemask_epi8(_mm_cmplt_epi8(va, vb));

  unsigned cgt = __tzcnt_u32(gt);
  unsigned clt = __tzcnt_u32(lt);
  return ((eq == 0xFFFF) ? 0 : (clt - cgt));
}

clang -g0 -O3 -march=btver2

cmp16(char const*, char const*):
        vmovdqu (%rdi), %xmm0
        vmovdqu (%rsi), %xmm1
        vpcmpeqb        %xmm1, %xmm0, %xmm2
        vpmovmskb       %xmm2, %ecx
        vmovdqa .LCPI0_0(%rip), %xmm2   # xmm2 =
[9259542123273814144,9259542123273814144]
        vpxor   %xmm2, %xmm0, %xmm0
        vpxor   %xmm2, %xmm1, %xmm1
        vpcmpgtb        %xmm1, %xmm0, %xmm2
        vpcmpgtb        %xmm0, %xmm1, %xmm0
        vpmovmskb       %xmm2, %eax
        vpmovmskb       %xmm0, %edx
        tzcntl  %eax, %eax
        tzcntl  %edx, %edx
        subl    %eax, %edx
        xorl    %eax, %eax
        cmpl    $65535, %ecx            # imm = 0xFFFF
        cmovnel %edx, %eax
        retq</pre>
        </div>
      </p>


      <hr>
      <span>You are receiving this mail because:</span>

      <ul>
          <li>You are on the CC list for the bug.</li>
      </ul>
    </body>
</html>