[llvm] r355436 - [NFC][CodeGen][X86][AArch64] Add tests for C++ std::midpoint() pattern (PR40965)

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 5 12:18:47 PST 2019


Author: lebedevri
Date: Tue Mar  5 12:18:47 2019
New Revision: 355436

URL: http://llvm.org/viewvc/llvm-project?rev=355436&view=rev
Log:
[NFC][CodeGen][X86][AArch64] Add tests for C++ std::midpoint() pattern (PR40965)

Tests only for integers, not floating point or pointers.

The scalar 8-bit case uses branch instead of CMOV,
because there is no no 8-bit CMOV.

Vector tests are for consistency, since it can be vectorized.

https://bugs.llvm.org/show_bug.cgi?id=40965

Added:
    llvm/trunk/test/CodeGen/AArch64/midpoint-int.ll
    llvm/trunk/test/CodeGen/X86/midpoint-int-vec-128.ll
    llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll
    llvm/trunk/test/CodeGen/X86/midpoint-int-vec-512.ll
    llvm/trunk/test/CodeGen/X86/midpoint-int.ll

Added: llvm/trunk/test/CodeGen/AArch64/midpoint-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/midpoint-int.ll?rev=355436&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/midpoint-int.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/midpoint-int.ll Tue Mar  5 12:18:47 2019
@@ -0,0 +1,535 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
+
+; These test cases are inspired by C++2a std::midpoint().
+; See https://bugs.llvm.org/show_bug.cgi?id=40965
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: scalar_i32_signed_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w9, w1, w0, gt
+; CHECK-NEXT:    csel w10, w0, w1, gt
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    cneg w8, w8, le
+; CHECK-NEXT:    lsr w9, w9, #1
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
+; CHECK-LABEL: scalar_i32_unsigned_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp w0, w1
+; CHECK-NEXT:    csel w9, w1, w0, hi
+; CHECK-NEXT:    csel w10, w0, w1, hi
+; CHECK-NEXT:    mov w8, #-1
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    cneg w8, w8, ls
+; CHECK-NEXT:    lsr w9, w9, #1
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp ugt i32 %a1, %a2
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul i32 %t8, %t4
+  %a10 = add i32 %t9, %a1
+  ret i32 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
+; CHECK-LABEL: scalar_i32_signed_mem_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1
+; CHECK-NEXT:    csel w10, w1, w8, gt
+; CHECK-NEXT:    csel w11, w8, w1, gt
+; CHECK-NEXT:    sub w10, w11, w10
+; CHECK-NEXT:    cneg w9, w9, le
+; CHECK-NEXT:    lsr w10, w10, #1
+; CHECK-NEXT:    madd w0, w10, w9, w8
+; CHECK-NEXT:    ret
+  %a1 = load i32, i32* %a1_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i32_signed_reg_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x1]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w0, w8
+; CHECK-NEXT:    csel w10, w8, w0, gt
+; CHECK-NEXT:    csel w8, w0, w8, gt
+; CHECK-NEXT:    sub w8, w8, w10
+; CHECK-NEXT:    cneg w9, w9, le
+; CHECK-NEXT:    lsr w8, w8, #1
+; CHECK-NEXT:    madd w0, w8, w9, w0
+; CHECK-NEXT:    ret
+  %a2 = load i32, i32* %a2_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_signed_mem_mem(i32* %a1_addr, i32* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i32_signed_mem_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    ldr w9, [x1]
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    csel w11, w9, w8, gt
+; CHECK-NEXT:    csel w9, w8, w9, gt
+; CHECK-NEXT:    sub w9, w9, w11
+; CHECK-NEXT:    cneg w10, w10, le
+; CHECK-NEXT:    lsr w9, w9, #1
+; CHECK-NEXT:    madd w0, w9, w10, w8
+; CHECK-NEXT:    ret
+  %a1 = load i32, i32* %a1_addr
+  %a2 = load i32, i32* %a2_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
+; CHECK-LABEL: scalar_i64_signed_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x9, x1, x0, gt
+; CHECK-NEXT:    csel x10, x0, x1, gt
+; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    sub x9, x10, x9
+; CHECK-NEXT:    cneg x8, x8, le
+; CHECK-NEXT:    lsr x9, x9, #1
+; CHECK-NEXT:    madd x0, x9, x8, x0
+; CHECK-NEXT:    ret
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
+; CHECK-LABEL: scalar_i64_unsigned_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    cmp x0, x1
+; CHECK-NEXT:    csel x9, x1, x0, hi
+; CHECK-NEXT:    csel x10, x0, x1, hi
+; CHECK-NEXT:    mov x8, #-1
+; CHECK-NEXT:    sub x9, x10, x9
+; CHECK-NEXT:    cneg x8, x8, ls
+; CHECK-NEXT:    lsr x9, x9, #1
+; CHECK-NEXT:    madd x0, x9, x8, x0
+; CHECK-NEXT:    ret
+  %t3 = icmp ugt i64 %a1, %a2
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul i64 %t8, %t4
+  %a10 = add i64 %t9, %a1
+  ret i64 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
+; CHECK-LABEL: scalar_i64_signed_mem_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    cmp x8, x1
+; CHECK-NEXT:    csel x10, x1, x8, gt
+; CHECK-NEXT:    csel x11, x8, x1, gt
+; CHECK-NEXT:    sub x10, x11, x10
+; CHECK-NEXT:    cneg x9, x9, le
+; CHECK-NEXT:    lsr x10, x10, #1
+; CHECK-NEXT:    madd x0, x10, x9, x8
+; CHECK-NEXT:    ret
+  %a1 = load i64, i64* %a1_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i64_signed_reg_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x1]
+; CHECK-NEXT:    mov x9, #-1
+; CHECK-NEXT:    cmp x0, x8
+; CHECK-NEXT:    csel x10, x8, x0, gt
+; CHECK-NEXT:    csel x8, x0, x8, gt
+; CHECK-NEXT:    sub x8, x8, x10
+; CHECK-NEXT:    cneg x9, x9, le
+; CHECK-NEXT:    lsr x8, x8, #1
+; CHECK-NEXT:    madd x0, x8, x9, x0
+; CHECK-NEXT:    ret
+  %a2 = load i64, i64* %a2_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_signed_mem_mem(i64* %a1_addr, i64* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i64_signed_mem_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr x8, [x0]
+; CHECK-NEXT:    ldr x9, [x1]
+; CHECK-NEXT:    mov x10, #-1
+; CHECK-NEXT:    cmp x8, x9
+; CHECK-NEXT:    csel x11, x9, x8, gt
+; CHECK-NEXT:    csel x9, x8, x9, gt
+; CHECK-NEXT:    sub x9, x9, x11
+; CHECK-NEXT:    cneg x10, x10, le
+; CHECK-NEXT:    lsr x9, x9, #1
+; CHECK-NEXT:    madd x0, x9, x10, x8
+; CHECK-NEXT:    ret
+  %a1 = load i64, i64* %a1_addr
+  %a2 = load i64, i64* %a2_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
+; CHECK-LABEL: scalar_i16_signed_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxth w8, w0
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, sxth
+; CHECK-NEXT:    cneg w8, w9, le
+; CHECK-NEXT:    csel w9, w1, w0, gt
+; CHECK-NEXT:    csel w10, w0, w1, gt
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    ubfx w9, w9, #1, #15
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
+; CHECK-LABEL: scalar_i16_unsigned_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xffff
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, uxth
+; CHECK-NEXT:    cneg w8, w9, ls
+; CHECK-NEXT:    csel w9, w1, w0, hi
+; CHECK-NEXT:    csel w10, w0, w1, hi
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    ubfx w9, w9, #1, #15
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp ugt i16 %a1, %a2
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul i16 %t8, %t4
+  %a10 = add i16 %t9, %a1
+  ret i16 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
+; CHECK-LABEL: scalar_i16_signed_mem_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsh w8, [x0]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, sxth
+; CHECK-NEXT:    csel w10, w1, w8, gt
+; CHECK-NEXT:    csel w11, w8, w1, gt
+; CHECK-NEXT:    sub w10, w11, w10
+; CHECK-NEXT:    cneg w9, w9, le
+; CHECK-NEXT:    ubfx w10, w10, #1, #15
+; CHECK-NEXT:    madd w0, w10, w9, w8
+; CHECK-NEXT:    ret
+  %a1 = load i16, i16* %a1_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i16_signed_reg_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsh w8, [x1]
+; CHECK-NEXT:    sxth w9, w0
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    cneg w9, w10, le
+; CHECK-NEXT:    csel w10, w8, w0, gt
+; CHECK-NEXT:    csel w8, w0, w8, gt
+; CHECK-NEXT:    sub w8, w8, w10
+; CHECK-NEXT:    ubfx w8, w8, #1, #15
+; CHECK-NEXT:    madd w0, w8, w9, w0
+; CHECK-NEXT:    ret
+  %a2 = load i16, i16* %a2_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_signed_mem_mem(i16* %a1_addr, i16* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i16_signed_mem_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsh w8, [x0]
+; CHECK-NEXT:    ldrsh w9, [x1]
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    csel w11, w9, w8, gt
+; CHECK-NEXT:    csel w9, w8, w9, gt
+; CHECK-NEXT:    sub w9, w9, w11
+; CHECK-NEXT:    cneg w10, w10, le
+; CHECK-NEXT:    ubfx w9, w9, #1, #15
+; CHECK-NEXT:    madd w0, w9, w10, w8
+; CHECK-NEXT:    ret
+  %a1 = load i16, i16* %a1_addr
+  %a2 = load i16, i16* %a2_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind {
+; CHECK-LABEL: scalar_i8_signed_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sxtb w8, w0
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, sxtb
+; CHECK-NEXT:    cneg w8, w9, le
+; CHECK-NEXT:    csel w9, w1, w0, gt
+; CHECK-NEXT:    csel w10, w0, w1, gt
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    ubfx w9, w9, #1, #7
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
+; CHECK-LABEL: scalar_i8_unsigned_reg_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    and w8, w0, #0xff
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, uxtb
+; CHECK-NEXT:    cneg w8, w9, ls
+; CHECK-NEXT:    csel w9, w1, w0, hi
+; CHECK-NEXT:    csel w10, w0, w1, hi
+; CHECK-NEXT:    sub w9, w10, w9
+; CHECK-NEXT:    ubfx w9, w9, #1, #7
+; CHECK-NEXT:    madd w0, w9, w8, w0
+; CHECK-NEXT:    ret
+  %t3 = icmp ugt i8 %a1, %a2
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul i8 %t8, %t4
+  %a10 = add i8 %t9, %a1
+  ret i8 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
+; CHECK-LABEL: scalar_i8_signed_mem_reg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsb w8, [x0]
+; CHECK-NEXT:    mov w9, #-1
+; CHECK-NEXT:    cmp w8, w1, sxtb
+; CHECK-NEXT:    csel w10, w1, w8, gt
+; CHECK-NEXT:    csel w11, w8, w1, gt
+; CHECK-NEXT:    sub w10, w11, w10
+; CHECK-NEXT:    cneg w9, w9, le
+; CHECK-NEXT:    ubfx w10, w10, #1, #7
+; CHECK-NEXT:    madd w0, w10, w9, w8
+; CHECK-NEXT:    ret
+  %a1 = load i8, i8* %a1_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i8_signed_reg_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsb w8, [x1]
+; CHECK-NEXT:    sxtb w9, w0
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    cmp w9, w8
+; CHECK-NEXT:    cneg w9, w10, le
+; CHECK-NEXT:    csel w10, w8, w0, gt
+; CHECK-NEXT:    csel w8, w0, w8, gt
+; CHECK-NEXT:    sub w8, w8, w10
+; CHECK-NEXT:    ubfx w8, w8, #1, #7
+; CHECK-NEXT:    madd w0, w8, w9, w0
+; CHECK-NEXT:    ret
+  %a2 = load i8, i8* %a2_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_signed_mem_mem(i8* %a1_addr, i8* %a2_addr) nounwind {
+; CHECK-LABEL: scalar_i8_signed_mem_mem:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldrsb w8, [x0]
+; CHECK-NEXT:    ldrsb w9, [x1]
+; CHECK-NEXT:    mov w10, #-1
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    csel w11, w9, w8, gt
+; CHECK-NEXT:    csel w9, w8, w9, gt
+; CHECK-NEXT:    sub w9, w9, w11
+; CHECK-NEXT:    cneg w10, w10, le
+; CHECK-NEXT:    ubfx w9, w9, #1, #7
+; CHECK-NEXT:    madd w0, w9, w10, w8
+; CHECK-NEXT:    ret
+  %a1 = load i8, i8* %a1_addr
+  %a2 = load i8, i8* %a2_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}

Added: llvm/trunk/test/CodeGen/X86/midpoint-int-vec-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/midpoint-int-vec-128.ll?rev=355436&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/midpoint-int-vec-128.ll (added)
+++ llvm/trunk/test/CodeGen/X86/midpoint-int-vec-128.ll Tue Mar  5 12:18:47 2019
@@ -0,0 +1,3966 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1,AVX1-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=ALL,XOP,XOP-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX1,XOPAVX,XOPAVX1,XOPAVX1-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX2,XOPAVX,XOPAVX2,XOPAVX2-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512VL-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW,AVX512BW-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512BW,AVX512VLBW
+
+; These test cases are inspired by C++2a std::midpoint().
+; See https://bugs.llvm.org/show_bug.cgi?id=40965
+
+; Using 128-bit vector regs.
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit width. 128 / 32 = 4 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
+; SSE2-LABEL: vec128_i32_signed_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubd %xmm4, %xmm2
+; SSE2-NEXT:    psrld $1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i32_signed_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pminsd %xmm1, %xmm3
+; SSE41-NEXT:    pmaxsd %xmm0, %xmm1
+; SSE41-NEXT:    psubd %xmm3, %xmm1
+; SSE41-NEXT:    psrld $1, %xmm1
+; SSE41-NEXT:    pmulld %xmm1, %xmm2
+; SSE41-NEXT:    paddd %xmm0, %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i32_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i32_signed_reg_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i32_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i32_signed_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa32 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
+  %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
+  %t7 = sub <4 x i32> %t6, %t5
+  %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i32> %t9, %a1 ; signed
+  ret <4 x i32> %a10
+}
+
+define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwind {
+; SSE2-LABEL: vec128_i32_unsigned_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    pxor %xmm2, %xmm3
+; SSE2-NEXT:    pxor %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm3, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm5 = [1,1,1,1]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    pcmpgtd %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pand %xmm4, %xmm2
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm2, %xmm4
+; SSE2-NEXT:    psubd %xmm3, %xmm4
+; SSE2-NEXT:    psrld $1, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm5, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i32_unsigned_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pminud %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm2, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmaxud %xmm0, %xmm1
+; SSE41-NEXT:    psubd %xmm2, %xmm1
+; SSE41-NEXT:    psrld $1, %xmm1
+; SSE41-NEXT:    pmulld %xmm1, %xmm4
+; SSE41-NEXT:    paddd %xmm4, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
+; AVX2-FALLBACK-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpcomgtud %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i32_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpcomgtud %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i32_unsigned_reg_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpcomgtud %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpcmpnleud %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpnleud %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa32 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleud %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp ugt <4 x i32> %a1, %a2
+  %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
+  %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
+  %t7 = sub <4 x i32> %t6, %t5
+  %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul <4 x i32> %t8, %t4
+  %a10 = add <4 x i32> %t9, %a1
+  ret <4 x i32> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <4 x i32> @vec128_i32_signed_mem_reg(<4 x i32>* %a1_addr, <4 x i32> %a2) nounwind {
+; SSE2-LABEL: vec128_i32_signed_mem_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubd %xmm4, %xmm2
+; SSE2-NEXT:    psrld $1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    paddd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i32_signed_mem_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pminsd %xmm0, %xmm3
+; SSE41-NEXT:    pmaxsd %xmm1, %xmm0
+; SSE41-NEXT:    psubd %xmm3, %xmm0
+; SSE41-NEXT:    psrld $1, %xmm0
+; SSE41-NEXT:    pmulld %xmm2, %xmm0
+; SSE41-NEXT:    paddd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtd %xmm0, %xmm1, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i32_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtd %xmm0, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i32_signed_mem_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtd %xmm0, %xmm1, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpmacsdd %xmm1, %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i32_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsd %xmm0, %xmm1, %xmm2
+; AVX512F-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpmulld %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i32_signed_mem_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa32 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsd %xmm0, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpmulld %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsubd %xmm2, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpmulld %xmm3, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <4 x i32>, <4 x i32>* %a1_addr
+  %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
+  %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
+  %t7 = sub <4 x i32> %t6, %t5
+  %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i32> %t9, %a1 ; signed
+  ret <4 x i32> %a10
+}
+
+define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, <4 x i32>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i32_signed_reg_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubd %xmm4, %xmm2
+; SSE2-NEXT:    psrld $1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm1, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm3[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    paddd %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i32_signed_reg_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm2, %xmm1
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pminsd %xmm2, %xmm3
+; SSE41-NEXT:    pmaxsd %xmm0, %xmm2
+; SSE41-NEXT:    psubd %xmm3, %xmm2
+; SSE41-NEXT:    psrld $1, %xmm2
+; SSE41-NEXT:    pmulld %xmm2, %xmm1
+; SSE41-NEXT:    paddd %xmm0, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i32_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i32_signed_reg_mem:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i32_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i32_signed_reg_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa32 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a2 = load <4 x i32>, <4 x i32>* %a2_addr
+  %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
+  %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
+  %t7 = sub <4 x i32> %t6, %t5
+  %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i32> %t9, %a1 ; signed
+  ret <4 x i32> %a10
+}
+
+define <4 x i32> @vec128_i32_signed_mem_mem(<4 x i32>* %a1_addr, <4 x i32>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i32_signed_mem_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa (%rsi), %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pcmpgtd %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubd %xmm4, %xmm2
+; SSE2-NEXT:    psrld $1, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSE2-NEXT:    pmuludq %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSE2-NEXT:    paddd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i32_signed_mem_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm1
+; SSE41-NEXT:    movdqa (%rsi), %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pminsd %xmm0, %xmm3
+; SSE41-NEXT:    pmaxsd %xmm1, %xmm0
+; SSE41-NEXT:    psubd %xmm3, %xmm0
+; SSE41-NEXT:    psrld $1, %xmm0
+; SSE41-NEXT:    pmulld %xmm2, %xmm0
+; SSE41-NEXT:    paddd %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i32_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i32_signed_mem_mem:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; XOPAVX2-NEXT:    vmovdqa (%rsi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtd %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; XOPAVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmacsdd %xmm0, %xmm2, %xmm1, %xmm0
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i32_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i32_signed_mem_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VL-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtd %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa32 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtd %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa32 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <4 x i32>, <4 x i32>* %a1_addr
+  %a2 = load <4 x i32>, <4 x i32>* %a2_addr
+  %t3 = icmp sgt <4 x i32> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <4 x i1> %t3, <4 x i32> %a2, <4 x i32> %a1
+  %t6 = select <4 x i1> %t3, <4 x i32> %a1, <4 x i32> %a2
+  %t7 = sub <4 x i32> %t6, %t5
+  %t8 = lshr <4 x i32> %t7, <i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <4 x i32> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i32> %t9, %a1 ; signed
+  ret <4 x i32> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit width. 128 / 64 = 2 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
+; SSE2-LABEL: vec128_i64_signed_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    pxor %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    psubq %xmm5, %xmm2
+; SSE2-NEXT:    psrlq $1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    psrlq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    paddq %xmm4, %xmm1
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    paddq %xmm0, %xmm1
+; SSE2-NEXT:    paddq %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i64_signed_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    pxor %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE41-NEXT:    por %xmm6, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE41-NEXT:    por %xmm4, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT:    psubq %xmm5, %xmm1
+; SSE41-NEXT:    psrlq $1, %xmm1
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrlq $32, %xmm4
+; SSE41-NEXT:    pmuludq %xmm3, %xmm4
+; SSE41-NEXT:    paddq %xmm0, %xmm4
+; SSE41-NEXT:    psllq $32, %xmm4
+; SSE41-NEXT:    pmuludq %xmm1, %xmm3
+; SSE41-NEXT:    paddq %xmm2, %xmm4
+; SSE41-NEXT:    paddq %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i64_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; XOP-NEXT:    vpcomltq %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; XOP-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; XOP-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; XOP-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i64_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512F-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512F-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i64_signed_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512VL-NEXT:    vmovdqa64 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
+  %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
+  %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
+  %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
+  %t7 = sub <2 x i64> %t6, %t5
+  %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
+  %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <2 x i64> %t9, %a1 ; signed
+  ret <2 x i64> %a10
+}
+
+define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwind {
+; SSE2-LABEL: vec128_i64_unsigned_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [9223372039002259456,9223372039002259456]
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    pxor %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    psubq %xmm5, %xmm2
+; SSE2-NEXT:    psrlq $1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    psrlq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    paddq %xmm4, %xmm1
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    paddq %xmm0, %xmm1
+; SSE2-NEXT:    paddq %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i64_unsigned_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [9223372039002259456,9223372039002259456]
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    pxor %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm2, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm3
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm0, %xmm4
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; SSE41-NEXT:    pand %xmm7, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; SSE41-NEXT:    por %xmm6, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE41-NEXT:    por %xmm4, %xmm3
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pand %xmm7, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm5
+; SSE41-NEXT:    movdqa %xmm4, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm2, %xmm1
+; SSE41-NEXT:    psubq %xmm5, %xmm1
+; SSE41-NEXT:    psrlq $1, %xmm1
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm1, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm4
+; SSE41-NEXT:    psrlq $32, %xmm4
+; SSE41-NEXT:    pmuludq %xmm3, %xmm4
+; SSE41-NEXT:    paddq %xmm0, %xmm4
+; SSE41-NEXT:    psllq $32, %xmm4
+; SSE41-NEXT:    pmuludq %xmm1, %xmm3
+; SSE41-NEXT:    paddq %xmm2, %xmm4
+; SSE41-NEXT:    paddq %xmm4, %xmm3
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX1-FALLBACK-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX2-FALLBACK-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX2-FALLBACK-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm4, %xmm5
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i64_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; XOP-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; XOP-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; XOP-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; XOP-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512F-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512F-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpnleuq %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512VL-NEXT:    vmovdqa64 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminuq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxuq %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminuq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp ugt <2 x i64> %a1, %a2
+  %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
+  %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
+  %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
+  %t7 = sub <2 x i64> %t6, %t5
+  %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
+  %t9 = mul <2 x i64> %t8, %t4
+  %a10 = add <2 x i64> %t9, %a1
+  ret <2 x i64> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <2 x i64> @vec128_i64_signed_mem_reg(<2 x i64>* %a1_addr, <2 x i64> %a2) nounwind {
+; SSE2-LABEL: vec128_i64_signed_mem_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    pxor %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    psubq %xmm5, %xmm2
+; SSE2-NEXT:    psrlq $1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrlq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm3, %xmm0
+; SSE2-NEXT:    paddq %xmm4, %xmm0
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    paddq %xmm1, %xmm0
+; SSE2-NEXT:    paddq %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i64_signed_mem_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    movdqa (%rdi), %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT:    pxor %xmm5, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm5, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm5, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm0, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT:    por %xmm4, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,1]
+; SSE41-NEXT:    por %xmm2, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm0
+; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm0[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm5
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm1, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm1
+; SSE41-NEXT:    psubq %xmm5, %xmm1
+; SSE41-NEXT:    psrlq $1, %xmm1
+; SSE41-NEXT:    movdqa %xmm4, %xmm2
+; SSE41-NEXT:    psrlq $32, %xmm2
+; SSE41-NEXT:    pmuludq %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm0
+; SSE41-NEXT:    paddq %xmm2, %xmm0
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm1
+; SSE41-NEXT:    paddq %xmm3, %xmm0
+; SSE41-NEXT:    paddq %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm4
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i64_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; XOP-NEXT:    vpcomltq %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vblendvpd %xmm4, %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; XOP-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm0, %xmm4
+; XOP-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; XOP-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i64_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpsrlq $32, %xmm0, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512F-NEXT:    vpmuludq %xmm4, %xmm0, %xmm4
+; AVX512F-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX512F-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i64_signed_mem_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtq %xmm0, %xmm1, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512VL-NEXT:    vmovdqa64 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %xmm0, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpmaxsq %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpsrlq $32, %xmm0, %xmm4
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX512VL-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm0, %zmm1, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm0, %zmm1, %zmm0
+; AVX512BW-FALLBACK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm0, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <2 x i64>, <2 x i64>* %a1_addr
+  %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
+  %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
+  %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
+  %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
+  %t7 = sub <2 x i64> %t6, %t5
+  %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
+  %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <2 x i64> %t9, %a1 ; signed
+  ret <2 x i64> %a10
+}
+
+define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, <2 x i64>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i64_signed_reg_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    pxor %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm5, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    movdqa %xmm5, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm4, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm4[1,1,3,3]
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubq %xmm4, %xmm2
+; SSE2-NEXT:    psrlq $1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    psrlq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm1
+; SSE2-NEXT:    paddq %xmm4, %xmm1
+; SSE2-NEXT:    psllq $32, %xmm1
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    paddq %xmm0, %xmm1
+; SSE2-NEXT:    paddq %xmm2, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i64_signed_reg_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    movdqa (%rdi), %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm5 = [2147483648,2147483648]
+; SSE41-NEXT:    pxor %xmm5, %xmm0
+; SSE41-NEXT:    pxor %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm2[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm0, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE41-NEXT:    por %xmm4, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,1]
+; SSE41-NEXT:    por %xmm2, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm3, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm5
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm1, %xmm3
+; SSE41-NEXT:    psubq %xmm5, %xmm3
+; SSE41-NEXT:    psrlq $1, %xmm3
+; SSE41-NEXT:    movdqa %xmm4, %xmm2
+; SSE41-NEXT:    psrlq $32, %xmm2
+; SSE41-NEXT:    pmuludq %xmm3, %xmm2
+; SSE41-NEXT:    movdqa %xmm3, %xmm0
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm0
+; SSE41-NEXT:    paddq %xmm2, %xmm0
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm3
+; SSE41-NEXT:    paddq %xmm1, %xmm0
+; SSE41-NEXT:    paddq %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i64_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; XOP-NEXT:    vpcomltq %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; XOP-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; XOP-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; XOP-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i64_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512F-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512F-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i64_signed_reg_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512VL-NEXT:    vmovdqa64 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a2 = load <2 x i64>, <2 x i64>* %a2_addr
+  %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
+  %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
+  %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
+  %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
+  %t7 = sub <2 x i64> %t6, %t5
+  %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
+  %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <2 x i64> %t9, %a1 ; signed
+  ret <2 x i64> %a10
+}
+
+define <2 x i64> @vec128_i64_signed_mem_mem(<2 x i64>* %a1_addr, <2 x i64>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i64_signed_mem_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa (%rsi), %xmm0
+; SSE2-NEXT:    movdqa {{.*#+}} xmm4 = [2147483648,2147483648]
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pxor %xmm4, %xmm5
+; SSE2-NEXT:    pxor %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm4, %xmm2
+; SSE2-NEXT:    pcmpgtd %xmm5, %xmm2
+; SSE2-NEXT:    pshufd {{.*#+}} xmm3 = xmm2[0,0,2,2]
+; SSE2-NEXT:    movdqa %xmm4, %xmm6
+; SSE2-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE2-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE2-NEXT:    pand %xmm6, %xmm3
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; SSE2-NEXT:    por %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtd %xmm4, %xmm5
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm5[0,0,2,2]
+; SSE2-NEXT:    pand %xmm6, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pand %xmm5, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm5
+; SSE2-NEXT:    por %xmm4, %xmm5
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pand %xmm2, %xmm4
+; SSE2-NEXT:    pandn %xmm0, %xmm2
+; SSE2-NEXT:    por %xmm4, %xmm2
+; SSE2-NEXT:    psubq %xmm5, %xmm2
+; SSE2-NEXT:    psrlq $1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    psrlq $32, %xmm4
+; SSE2-NEXT:    pmuludq %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    psrlq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm3, %xmm0
+; SSE2-NEXT:    paddq %xmm4, %xmm0
+; SSE2-NEXT:    psllq $32, %xmm0
+; SSE2-NEXT:    pmuludq %xmm3, %xmm2
+; SSE2-NEXT:    paddq %xmm1, %xmm0
+; SSE2-NEXT:    paddq %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i64_signed_mem_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm3
+; SSE41-NEXT:    movdqa (%rsi), %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [2147483648,2147483648]
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
+; SSE41-NEXT:    pxor %xmm0, %xmm5
+; SSE41-NEXT:    pxor %xmm3, %xmm0
+; SSE41-NEXT:    movdqa %xmm0, %xmm1
+; SSE41-NEXT:    pcmpgtd %xmm5, %xmm1
+; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[0,0,2,2]
+; SSE41-NEXT:    movdqa %xmm0, %xmm6
+; SSE41-NEXT:    pcmpeqd %xmm5, %xmm6
+; SSE41-NEXT:    pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
+; SSE41-NEXT:    pand %xmm6, %xmm4
+; SSE41-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; SSE41-NEXT:    por %xmm4, %xmm1
+; SSE41-NEXT:    movdqa {{.*#+}} xmm4 = [1,1]
+; SSE41-NEXT:    por %xmm1, %xmm4
+; SSE41-NEXT:    pcmpgtd %xmm0, %xmm5
+; SSE41-NEXT:    pshufd {{.*#+}} xmm0 = xmm5[0,0,2,2]
+; SSE41-NEXT:    pand %xmm6, %xmm0
+; SSE41-NEXT:    por %xmm5, %xmm0
+; SSE41-NEXT:    movdqa %xmm2, %xmm5
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm5
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    blendvpd %xmm0, %xmm3, %xmm2
+; SSE41-NEXT:    psubq %xmm5, %xmm2
+; SSE41-NEXT:    psrlq $1, %xmm2
+; SSE41-NEXT:    movdqa %xmm4, %xmm1
+; SSE41-NEXT:    psrlq $32, %xmm1
+; SSE41-NEXT:    pmuludq %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm2, %xmm0
+; SSE41-NEXT:    psrlq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm0
+; SSE41-NEXT:    paddq %xmm1, %xmm0
+; SSE41-NEXT:    psllq $32, %xmm0
+; SSE41-NEXT:    pmuludq %xmm4, %xmm2
+; SSE41-NEXT:    paddq %xmm3, %xmm0
+; SSE41-NEXT:    paddq %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i64_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm0
+; XOP-NEXT:    vmovdqa (%rsi), %xmm1
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm3
+; XOP-NEXT:    vpcomltq %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vblendvpd %xmm4, %xmm0, %xmm1, %xmm4
+; XOP-NEXT:    vblendvpd %xmm2, %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; XOP-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; XOP-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; XOP-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i64_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512F-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512F-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512F-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512F-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512F-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512F-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec128_i64_signed_mem_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VL-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VL-NEXT:    vpcmpgtq %xmm1, %xmm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512VL-NEXT:    vmovdqa64 %xmm2, %xmm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %xmm1, %xmm0, %xmm2
+; AVX512VL-NEXT:    vpmaxsq %xmm1, %xmm0, %xmm1
+; AVX512VL-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpsrlq $32, %xmm3, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm1, %xmm2
+; AVX512VL-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm4, %xmm4
+; AVX512VL-NEXT:    vpaddq %xmm4, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512VL-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm2, %xmm4, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <2 x i64>, <2 x i64>* %a1_addr
+  %a2 = load <2 x i64>, <2 x i64>* %a2_addr
+  %t3 = icmp sgt <2 x i64> %a1, %a2 ; signed
+  %t4 = select <2 x i1> %t3, <2 x i64> <i64 -1, i64 -1>, <2 x i64> <i64 1, i64 1>
+  %t5 = select <2 x i1> %t3, <2 x i64> %a2, <2 x i64> %a1
+  %t6 = select <2 x i1> %t3, <2 x i64> %a1, <2 x i64> %a2
+  %t7 = sub <2 x i64> %t6, %t5
+  %t8 = lshr <2 x i64> %t7, <i64 1, i64 1>
+  %t9 = mul nsw <2 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <2 x i64> %t9, %a1 ; signed
+  ret <2 x i64> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit width. 128 / 16 = 8 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
+; SSE-LABEL: vec128_i16_signed_reg_reg:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm1, %xmm2
+; SSE-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pminsw %xmm1, %xmm3
+; SSE-NEXT:    pmaxsw %xmm0, %xmm1
+; SSE-NEXT:    psubw %xmm3, %xmm1
+; SSE-NEXT:    psrlw $1, %xmm1
+; SSE-NEXT:    pmullw %xmm1, %xmm2
+; SSE-NEXT:    paddw %xmm0, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i16_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i16_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i16_signed_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpgtw %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
+  %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
+  %t7 = sub <8 x i16> %t6, %t5
+  %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i16> %t9, %a1 ; signed
+  ret <8 x i16> %a10
+}
+
+define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwind {
+; SSE2-LABEL: vec128_i16_unsigned_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [32768,32768,32768,32768,32768,32768,32768,32768]
+; SSE2-NEXT:    pxor %xmm3, %xmm1
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    pcmpgtw %xmm1, %xmm4
+; SSE2-NEXT:    por {{.*}}(%rip), %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pminsw %xmm1, %xmm5
+; SSE2-NEXT:    pxor %xmm3, %xmm5
+; SSE2-NEXT:    pmaxsw %xmm1, %xmm2
+; SSE2-NEXT:    pxor %xmm3, %xmm2
+; SSE2-NEXT:    psubw %xmm5, %xmm2
+; SSE2-NEXT:    psrlw $1, %xmm2
+; SSE2-NEXT:    pmullw %xmm4, %xmm2
+; SSE2-NEXT:    paddw %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i16_unsigned_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pminuw %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pcmpeqw %xmm2, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmaxuw %xmm0, %xmm1
+; SSE41-NEXT:    psubw %xmm2, %xmm1
+; SSE41-NEXT:    psrlw $1, %xmm1
+; SSE41-NEXT:    pmullw %xmm1, %xmm4
+; SSE41-NEXT:    paddw %xmm4, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i16_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpminuw %xmm1, %xmm0, %xmm3
+; XOP-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX512F-NEXT:    vpternlogq $15, %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %xmm3, %xmm3, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleuw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpnleuw %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp ugt <8 x i16> %a1, %a2
+  %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
+  %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
+  %t7 = sub <8 x i16> %t6, %t5
+  %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul <8 x i16> %t8, %t4
+  %a10 = add <8 x i16> %t9, %a1
+  ret <8 x i16> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <8 x i16> @vec128_i16_signed_mem_reg(<8 x i16>* %a1_addr, <8 x i16> %a2) nounwind {
+; SSE-LABEL: vec128_i16_signed_mem_reg:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm0, %xmm2
+; SSE-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    pminsw %xmm0, %xmm3
+; SSE-NEXT:    pmaxsw %xmm1, %xmm0
+; SSE-NEXT:    psubw %xmm3, %xmm0
+; SSE-NEXT:    psrlw $1, %xmm0
+; SSE-NEXT:    pmullw %xmm2, %xmm0
+; SSE-NEXT:    paddw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i16_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vpcomgtw %xmm0, %xmm1, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; XOP-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOP-NEXT:    vpmacsww %xmm1, %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i16_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX512F-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsubw %xmm2, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpmullw %xmm3, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i16_signed_mem_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtw %xmm0, %xmm1, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %xmm0, %xmm1, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vpsubw %xmm2, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpmullw %xmm3, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <8 x i16>, <8 x i16>* %a1_addr
+  %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
+  %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
+  %t7 = sub <8 x i16> %t6, %t5
+  %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i16> %t9, %a1 ; signed
+  ret <8 x i16> %a10
+}
+
+define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, <8 x i16>* %a2_addr) nounwind {
+; SSE-LABEL: vec128_i16_signed_reg_mem:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm2
+; SSE-NEXT:    movdqa %xmm0, %xmm1
+; SSE-NEXT:    pcmpgtw %xmm2, %xmm1
+; SSE-NEXT:    por {{.*}}(%rip), %xmm1
+; SSE-NEXT:    movdqa %xmm0, %xmm3
+; SSE-NEXT:    pminsw %xmm2, %xmm3
+; SSE-NEXT:    pmaxsw %xmm0, %xmm2
+; SSE-NEXT:    psubw %xmm3, %xmm2
+; SSE-NEXT:    psrlw $1, %xmm2
+; SSE-NEXT:    pmullw %xmm2, %xmm1
+; SSE-NEXT:    paddw %xmm0, %xmm1
+; SSE-NEXT:    movdqa %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i16_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i16_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i16_signed_reg_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtw %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+  %a2 = load <8 x i16>, <8 x i16>* %a2_addr
+  %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
+  %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
+  %t7 = sub <8 x i16> %t6, %t5
+  %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i16> %t9, %a1 ; signed
+  ret <8 x i16> %a10
+}
+
+define <8 x i16> @vec128_i16_signed_mem_mem(<8 x i16>* %a1_addr, <8 x i16>* %a2_addr) nounwind {
+; SSE-LABEL: vec128_i16_signed_mem_mem:
+; SSE:       # %bb.0:
+; SSE-NEXT:    movdqa (%rdi), %xmm1
+; SSE-NEXT:    movdqa (%rsi), %xmm0
+; SSE-NEXT:    movdqa %xmm1, %xmm2
+; SSE-NEXT:    pcmpgtw %xmm0, %xmm2
+; SSE-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE-NEXT:    movdqa %xmm1, %xmm3
+; SSE-NEXT:    pminsw %xmm0, %xmm3
+; SSE-NEXT:    pmaxsw %xmm1, %xmm0
+; SSE-NEXT:    psubw %xmm3, %xmm0
+; SSE-NEXT:    psrlw $1, %xmm0
+; SSE-NEXT:    pmullw %xmm2, %xmm0
+; SSE-NEXT:    paddw %xmm1, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-LABEL: vec128_i16_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm0
+; XOP-NEXT:    vmovdqa (%rsi), %xmm1
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm2
+; XOP-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm0, %xmm2, %xmm1, %xmm0
+; XOP-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i16_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i16_signed_mem_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VLBW-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtw %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <8 x i16>, <8 x i16>* %a1_addr
+  %a2 = load <8 x i16>, <8 x i16>* %a2_addr
+  %t3 = icmp sgt <8 x i16> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <8 x i1> %t3, <8 x i16> %a2, <8 x i16> %a1
+  %t6 = select <8 x i1> %t3, <8 x i16> %a1, <8 x i16> %a2
+  %t7 = sub <8 x i16> %t6, %t5
+  %t8 = lshr <8 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <8 x i16> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i16> %t9, %a1 ; signed
+  ret <8 x i16> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit width. 128 / 8 = 16 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
+; SSE2-LABEL: vec128_i8_signed_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubb %xmm4, %xmm2
+; SSE2-NEXT:    psrlw $1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm1, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm1, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT:    pmullw %xmm3, %xmm2
+; SSE2-NEXT:    pand %xmm1, %xmm2
+; SSE2-NEXT:    packuswb %xmm4, %xmm2
+; SSE2-NEXT:    paddb %xmm0, %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i8_signed_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pminsb %xmm1, %xmm3
+; SSE41-NEXT:    pmaxsb %xmm0, %xmm1
+; SSE41-NEXT:    psubb %xmm3, %xmm1
+; SSE41-NEXT:    psrlw $1, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm1, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm1, %xmm2
+; SSE41-NEXT:    pmullw %xmm4, %xmm3
+; SSE41-NEXT:    pand %xmm1, %xmm3
+; SSE41-NEXT:    packuswb %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vzeroupper
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i8_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i8_signed_reg_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vzeroupper
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i8_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vzeroupper
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i8_signed_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpgtb %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
+  %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
+  %t7 = sub <16 x i8> %t6, %t5
+  %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <16 x i8> %t9, %a1 ; signed
+  ret <16 x i8> %a10
+}
+
+define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwind {
+; SSE2-LABEL: vec128_i8_unsigned_reg_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm3
+; SSE2-NEXT:    pminub %xmm1, %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    pcmpeqb %xmm3, %xmm4
+; SSE2-NEXT:    pcmpeqd %xmm2, %xmm2
+; SSE2-NEXT:    pxor %xmm4, %xmm2
+; SSE2-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    pmaxub %xmm0, %xmm1
+; SSE2-NEXT:    psubb %xmm3, %xmm1
+; SSE2-NEXT:    psrlw $1, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm3, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT:    pmullw %xmm1, %xmm2
+; SSE2-NEXT:    pand %xmm3, %xmm2
+; SSE2-NEXT:    packuswb %xmm4, %xmm2
+; SSE2-NEXT:    paddb %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i8_unsigned_reg_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pminub %xmm1, %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pcmpeqb %xmm2, %xmm3
+; SSE41-NEXT:    pcmpeqd %xmm4, %xmm4
+; SSE41-NEXT:    pxor %xmm3, %xmm4
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm4
+; SSE41-NEXT:    pmaxub %xmm0, %xmm1
+; SSE41-NEXT:    psubb %xmm2, %xmm1
+; SSE41-NEXT:    psrlw $1, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm1, %xmm4
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm1, %xmm4
+; SSE41-NEXT:    pmullw %xmm3, %xmm2
+; SSE41-NEXT:    pand %xmm1, %xmm2
+; SSE41-NEXT:    packuswb %xmm4, %xmm2
+; SSE41-NEXT:    paddb %xmm2, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vzeroupper
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i8_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i8_unsigned_reg_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vzeroupper
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX512F-NEXT:    vpternlogq $15, %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX512F-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %xmm3, %xmm3, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm3, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vzeroupper
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleub %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpnleub %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp ugt <16 x i8> %a1, %a2
+  %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
+  %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
+  %t7 = sub <16 x i8> %t6, %t5
+  %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul <16 x i8> %t8, %t4
+  %a10 = add <16 x i8> %t9, %a1
+  ret <16 x i8> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <16 x i8> @vec128_i8_signed_mem_reg(<16 x i8>* %a1_addr, <16 x i8> %a2) nounwind {
+; SSE2-LABEL: vec128_i8_signed_mem_reg:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    movdqa (%rdi), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm3
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    por %xmm3, %xmm0
+; SSE2-NEXT:    movdqa %xmm1, %xmm4
+; SSE2-NEXT:    pcmpgtb %xmm2, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm2, %xmm5
+; SSE2-NEXT:    pand %xmm3, %xmm5
+; SSE2-NEXT:    pandn %xmm1, %xmm3
+; SSE2-NEXT:    por %xmm5, %xmm3
+; SSE2-NEXT:    psubb %xmm4, %xmm3
+; SSE2-NEXT:    psrlw $1, %xmm3
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    movdqa %xmm3, %xmm1
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm1, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm1, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw %xmm3, %xmm0
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    packuswb %xmm4, %xmm0
+; SSE2-NEXT:    paddb %xmm2, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i8_signed_mem_reg:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm2
+; SSE41-NEXT:    movdqa %xmm2, %xmm3
+; SSE41-NEXT:    pcmpgtb %xmm0, %xmm3
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    movdqa %xmm2, %xmm1
+; SSE41-NEXT:    pminsb %xmm0, %xmm1
+; SSE41-NEXT:    pmaxsb %xmm2, %xmm0
+; SSE41-NEXT:    psubb %xmm1, %xmm0
+; SSE41-NEXT:    psrlw $1, %xmm0
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm0, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm0, %xmm3
+; SSE41-NEXT:    pmullw %xmm4, %xmm1
+; SSE41-NEXT:    pand %xmm0, %xmm1
+; SSE41-NEXT:    packuswb %xmm3, %xmm1
+; SSE41-NEXT:    paddb %xmm2, %xmm1
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-FALLBACK-NEXT:    vzeroupper
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpshlb %xmm3, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOP-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i8_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpperm {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOPAVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i8_signed_mem_reg:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm2
+; XOPAVX2-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; XOPAVX2-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT:    vzeroupper
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i8_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX512F-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX512F-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512F-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vzeroupper
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i8_signed_mem_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtb %xmm0, %xmm1, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %xmm0, %xmm1, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vpsubb %xmm2, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovwb %ymm0, %xmm0
+; AVX512VLBW-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <16 x i8>, <16 x i8>* %a1_addr
+  %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
+  %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
+  %t7 = sub <16 x i8> %t6, %t5
+  %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <16 x i8> %t9, %a1 ; signed
+  ret <16 x i8> %a10
+}
+
+define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, <16 x i8>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i8_signed_reg_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm3
+; SSE2-NEXT:    movdqa %xmm0, %xmm1
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    por %xmm1, %xmm2
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pcmpgtb %xmm0, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm3, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm0, %xmm5
+; SSE2-NEXT:    pand %xmm1, %xmm5
+; SSE2-NEXT:    pandn %xmm3, %xmm1
+; SSE2-NEXT:    por %xmm5, %xmm1
+; SSE2-NEXT:    psubb %xmm4, %xmm1
+; SSE2-NEXT:    psrlw $1, %xmm1
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT:    movdqa %xmm2, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm3, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT:    pmullw %xmm2, %xmm1
+; SSE2-NEXT:    pand %xmm3, %xmm1
+; SSE2-NEXT:    packuswb %xmm4, %xmm1
+; SSE2-NEXT:    paddb %xmm0, %xmm1
+; SSE2-NEXT:    movdqa %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i8_signed_reg_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm1
+; SSE41-NEXT:    movdqa %xmm0, %xmm2
+; SSE41-NEXT:    pcmpgtb %xmm1, %xmm2
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    movdqa %xmm0, %xmm3
+; SSE41-NEXT:    pminsb %xmm1, %xmm3
+; SSE41-NEXT:    pmaxsb %xmm0, %xmm1
+; SSE41-NEXT:    psubb %xmm3, %xmm1
+; SSE41-NEXT:    psrlw $1, %xmm1
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm1, %xmm2
+; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm1, %xmm2
+; SSE41-NEXT:    pmullw %xmm4, %xmm3
+; SSE41-NEXT:    pand %xmm1, %xmm3
+; SSE41-NEXT:    packuswb %xmm2, %xmm3
+; SSE41-NEXT:    paddb %xmm3, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vzeroupper
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i8_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i8_signed_reg_mem:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vzeroupper
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i8_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512F-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vzeroupper
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i8_signed_reg_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtb %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+  %a2 = load <16 x i8>, <16 x i8>* %a2_addr
+  %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
+  %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
+  %t7 = sub <16 x i8> %t6, %t5
+  %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <16 x i8> %t9, %a1 ; signed
+  ret <16 x i8> %a10
+}
+
+define <16 x i8> @vec128_i8_signed_mem_mem(<16 x i8>* %a1_addr, <16 x i8>* %a2_addr) nounwind {
+; SSE2-LABEL: vec128_i8_signed_mem_mem:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movdqa (%rdi), %xmm1
+; SSE2-NEXT:    movdqa (%rsi), %xmm3
+; SSE2-NEXT:    movdqa %xmm1, %xmm2
+; SSE2-NEXT:    pcmpgtb %xmm3, %xmm2
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; SSE2-NEXT:    por %xmm2, %xmm0
+; SSE2-NEXT:    movdqa %xmm3, %xmm4
+; SSE2-NEXT:    pcmpgtb %xmm1, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm4, %xmm5
+; SSE2-NEXT:    pandn %xmm3, %xmm4
+; SSE2-NEXT:    por %xmm5, %xmm4
+; SSE2-NEXT:    movdqa %xmm1, %xmm5
+; SSE2-NEXT:    pand %xmm2, %xmm5
+; SSE2-NEXT:    pandn %xmm3, %xmm2
+; SSE2-NEXT:    por %xmm5, %xmm2
+; SSE2-NEXT:    psubb %xmm4, %xmm2
+; SSE2-NEXT:    psrlw $1, %xmm2
+; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE2-NEXT:    movdqa %xmm2, %xmm3
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE2-NEXT:    movdqa %xmm0, %xmm4
+; SSE2-NEXT:    punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE2-NEXT:    pmullw %xmm3, %xmm4
+; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE2-NEXT:    pand %xmm3, %xmm4
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT:    punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE2-NEXT:    pmullw %xmm2, %xmm0
+; SSE2-NEXT:    pand %xmm3, %xmm0
+; SSE2-NEXT:    packuswb %xmm4, %xmm0
+; SSE2-NEXT:    paddb %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
+; SSE41-LABEL: vec128_i8_signed_mem_mem:
+; SSE41:       # %bb.0:
+; SSE41-NEXT:    movdqa (%rdi), %xmm1
+; SSE41-NEXT:    movdqa (%rsi), %xmm2
+; SSE41-NEXT:    movdqa %xmm1, %xmm3
+; SSE41-NEXT:    pcmpgtb %xmm2, %xmm3
+; SSE41-NEXT:    por {{.*}}(%rip), %xmm3
+; SSE41-NEXT:    movdqa %xmm1, %xmm0
+; SSE41-NEXT:    pminsb %xmm2, %xmm0
+; SSE41-NEXT:    pmaxsb %xmm1, %xmm2
+; SSE41-NEXT:    psubb %xmm0, %xmm2
+; SSE41-NEXT:    psrlw $1, %xmm2
+; SSE41-NEXT:    pand {{.*}}(%rip), %xmm2
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE41-NEXT:    pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; SSE41-NEXT:    punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE41-NEXT:    pmullw %xmm2, %xmm3
+; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; SSE41-NEXT:    pand %xmm2, %xmm3
+; SSE41-NEXT:    pmullw %xmm4, %xmm0
+; SSE41-NEXT:    pand %xmm2, %xmm0
+; SSE41-NEXT:    packuswb %xmm3, %xmm0
+; SSE41-NEXT:    paddb %xmm1, %xmm0
+; SSE41-NEXT:    retq
+;
+; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
+; AVX2-FALLBACK:       # %bb.0:
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-FALLBACK-NEXT:    vzeroupper
+; AVX2-FALLBACK-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec128_i8_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpperm {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14],xmm3[0,2,4,6,8,10,12,14]
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    retq
+;
+; XOPAVX2-LABEL: vec128_i8_signed_mem_mem:
+; XOPAVX2:       # %bb.0:
+; XOPAVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; XOPAVX2-NEXT:    vmovdqa (%rsi), %xmm1
+; XOPAVX2-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm2
+; XOPAVX2-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; XOPAVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; XOPAVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpcmpeqd %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT:    vpshlb %xmm3, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; XOPAVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; XOPAVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; XOPAVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; XOPAVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX2-NEXT:    vzeroupper
+; XOPAVX2-NEXT:    retq
+;
+; AVX512F-LABEL: vec128_i8_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512F-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512F-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512F-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512F-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512F-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512F-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512F-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512F-NEXT:    vzeroupper
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %xmm2, %xmm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-FALLBACK-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VL-FALLBACK-NEXT:    vzeroupper
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512BW-FALLBACK-NEXT:    vzeroupper
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec128_i8_signed_mem_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VLBW-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX512VLBW-NEXT:    vpcmpgtb %xmm1, %xmm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %xmm2, %xmm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %xmm1, %xmm0, %xmm2
+; AVX512VLBW-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX512VLBW-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX512VLBW-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovwb %ymm1, %xmm1
+; AVX512VLBW-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX512VLBW-NEXT:    vzeroupper
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <16 x i8>, <16 x i8>* %a1_addr
+  %a2 = load <16 x i8>, <16 x i8>* %a2_addr
+  %t3 = icmp sgt <16 x i8> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <16 x i1> %t3, <16 x i8> %a2, <16 x i8> %a1
+  %t6 = select <16 x i1> %t3, <16 x i8> %a1, <16 x i8> %a2
+  %t7 = sub <16 x i8> %t6, %t5
+  %t8 = lshr <16 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <16 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <16 x i8> %t9, %a1 ; signed
+  ret <16 x i8> %a10
+}

Added: llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll?rev=355436&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll (added)
+++ llvm/trunk/test/CodeGen/X86/midpoint-int-vec-256.ll Tue Mar  5 12:18:47 2019
@@ -0,0 +1,3638 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1,AVX1-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2,AVX2-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=ALL,XOP,XOP-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX1,XOPAVX,XOPAVX1,XOPAVX1-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,AVX,AVX2,XOPAVX,XOPAVX2,XOPAVX2-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512VL-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW,AVX512BW-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512BW,AVX512VLBW
+
+; These test cases are inspired by C++2a std::midpoint().
+; See https://bugs.llvm.org/show_bug.cgi?id=40965
+
+; Using 256-bit vector regs.
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit width. 256 / 32 = 8 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <8 x i32> @vec256_i32_signed_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i32_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i32_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i32_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vpminsd %xmm3, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm3, %xmm4, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i32_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpminsd %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxsd %xmm3, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512-LABEL: vec256_i32_signed_reg_reg:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX512-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX512-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %t3 = icmp sgt <8 x i32> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <8 x i1> %t3, <8 x i32> %a2, <8 x i32> %a1
+  %t6 = select <8 x i1> %t3, <8 x i32> %a1, <8 x i32> %a2
+  %t7 = sub <8 x i32> %t6, %t5
+  %t8 = lshr <8 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <8 x i32> %t8, %t8 ; signed
+  %a10 = add nsw <8 x i32> %t9, %a1 ; signed
+  ret <8 x i32> %a10
+}
+
+define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i32_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpminud %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i32_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminud %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmaxud %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i32_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vpminud %xmm3, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxud %xmm3, %xmm4, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i32_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpminud %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxud %xmm3, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512-LABEL: vec256_i32_unsigned_reg_reg:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpminud %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpmaxud %ymm1, %ymm0, %ymm1
+; AVX512-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX512-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %t3 = icmp ugt <8 x i32> %a1, %a2
+  %t4 = select <8 x i1> %t3, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <8 x i1> %t3, <8 x i32> %a2, <8 x i32> %a1
+  %t6 = select <8 x i1> %t3, <8 x i32> %a1, <8 x i32> %a2
+  %t7 = sub <8 x i32> %t6, %t5
+  %t8 = lshr <8 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul <8 x i32> %t8, %t8
+  %a10 = add <8 x i32> %t9, %a1
+  ret <8 x i32> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <8 x i32> @vec256_i32_signed_mem_reg(<8 x i32>* %a1_addr, <8 x i32> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i32_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i32_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpminsd %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpmaxsd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrld $1, %ymm0, %ymm0
+; AVX2-NEXT:    vpmulld %ymm0, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i32_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vpminsd %xmm4, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm4, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vpsubd %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm2, %xmm3, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i32_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpminsd %xmm4, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpmaxsd %xmm4, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpmacsdd %xmm2, %xmm3, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm1, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512-LABEL: vec256_i32_signed_mem_reg:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512-NEXT:    vpminsd %ymm0, %ymm1, %ymm2
+; AVX512-NEXT:    vpmaxsd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    vpsubd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrld $1, %ymm0, %ymm0
+; AVX512-NEXT:    vpmulld %ymm0, %ymm0, %ymm0
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a1 = load <8 x i32>, <8 x i32>* %a1_addr
+  %t3 = icmp sgt <8 x i32> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <8 x i1> %t3, <8 x i32> %a2, <8 x i32> %a1
+  %t6 = select <8 x i1> %t3, <8 x i32> %a1, <8 x i32> %a2
+  %t7 = sub <8 x i32> %t6, %t5
+  %t8 = lshr <8 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <8 x i32> %t8, %t8 ; signed
+  %a10 = add nsw <8 x i32> %t9, %a1 ; signed
+  ret <8 x i32> %a10
+}
+
+define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, <8 x i32>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i32_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i32_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i32_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vpminsd %xmm2, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm4, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i32_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vpminsd %xmm2, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxsd %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm4, %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512-LABEL: vec256_i32_signed_reg_mem:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX512-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX512-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %a2 = load <8 x i32>, <8 x i32>* %a2_addr
+  %t3 = icmp sgt <8 x i32> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <8 x i1> %t3, <8 x i32> %a2, <8 x i32> %a1
+  %t6 = select <8 x i1> %t3, <8 x i32> %a1, <8 x i32> %a2
+  %t7 = sub <8 x i32> %t6, %t5
+  %t8 = lshr <8 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <8 x i32> %t8, %t8 ; signed
+  %a10 = add nsw <8 x i32> %t9, %a1 ; signed
+  ret <8 x i32> %a10
+}
+
+define <8 x i32> @vec256_i32_signed_mem_mem(<8 x i32>* %a1_addr, <8 x i32>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i32_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubd %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i32_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX2-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i32_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm5
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubd %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i32_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm5
+; XOPAVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpsubd %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512-LABEL: vec256_i32_signed_mem_mem:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512-NEXT:    vpminsd %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpmaxsd %ymm1, %ymm0, %ymm1
+; AVX512-NEXT:    vpsubd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpsrld $1, %ymm1, %ymm1
+; AVX512-NEXT:    vpmulld %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %a1 = load <8 x i32>, <8 x i32>* %a1_addr
+  %a2 = load <8 x i32>, <8 x i32>* %a2_addr
+  %t3 = icmp sgt <8 x i32> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <8 x i1> %t3, <8 x i32> %a2, <8 x i32> %a1
+  %t6 = select <8 x i1> %t3, <8 x i32> %a1, <8 x i32> %a2
+  %t7 = sub <8 x i32> %t6, %t5
+  %t8 = lshr <8 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <8 x i32> %t8, %t8 ; signed
+  %a10 = add nsw <8 x i32> %t9, %a1 ; signed
+  ret <8 x i32> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit width. 256 / 64 = 4 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i64_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
+; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm3, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm3, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i64_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vpcomgtq %xmm3, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; XOPAVX1-NEXT:    vpcomltq %xmm3, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vpcomltq %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm6, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i64_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512F-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec256_i64_signed_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa64 %ymm2, %ymm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp sgt <4 x i64> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <4 x i1> %t3, <4 x i64> %a2, <4 x i64> %a1
+  %t6 = select <4 x i1> %t3, <4 x i64> %a1, <4 x i64> %a2
+  %t7 = sub <4 x i64> %t6, %t5
+  %t8 = lshr <4 x i64> %t7, <i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <4 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i64> %t9, %a1 ; signed
+  ret <4 x i64> %a10
+}
+
+define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808]
+; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpxor %xmm3, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm7, %xmm3, %xmm8
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm8, %ymm9
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm7, %xmm3
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm9, %ymm0, %ymm1, %ymm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm9 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm9, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor %xmm9, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
+; AVX2-NEXT:    vpxor %ymm2, %ymm1, %ymm3
+; AVX2-NEXT:    vpxor %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpcmpgtq %ymm3, %ymm2, %ymm4
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm5 = [1,1,1,1]
+; AVX2-NEXT:    vpor %ymm5, %ymm4, %ymm5
+; AVX2-NEXT:    vpcmpgtq %ymm2, %ymm3, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $32, %ymm5, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm3
+; AVX2-NEXT:    vpmuludq %ymm5, %ymm3, %ymm3
+; AVX2-NEXT:    vpaddq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm5, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm3, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; XOP-FALLBACK-NEXT:    vpcomltuq %xmm3, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm6, %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i64_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vpcomgtuq %xmm3, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm6
+; XOPAVX1-NEXT:    vpcomltuq %xmm3, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vpcomltuq %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm7, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm1, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm6, %ymm0, %ymm1, %ymm1
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm6, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpaddq %xmm6, %xmm7, %xmm6
+; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsllq $32, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm6, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminuq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512F-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vpcmpnleuq %ymm1, %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa64 %ymm2, %ymm3 {%k1}
+; AVX512VL-NEXT:    vpminuq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmaxuq %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminuq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+  %t3 = icmp ugt <4 x i64> %a1, %a2
+  %t4 = select <4 x i1> %t3, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <4 x i1> %t3, <4 x i64> %a2, <4 x i64> %a1
+  %t6 = select <4 x i1> %t3, <4 x i64> %a1, <4 x i64> %a2
+  %t7 = sub <4 x i64> %t6, %t5
+  %t8 = lshr <4 x i64> %t7, <i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul <4 x i64> %t8, %t4
+  %a10 = add <4 x i64> %t9, %a1
+  ret <4 x i64> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <4 x i64> @vec256_i64_signed_mem_reg(<4 x i64>* %a1_addr, <4 x i64> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm8
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i64_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm1, %ymm0, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsubq %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlq $1, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm0, %ymm2
+; AVX2-NEXT:    vpsrlq $32, %ymm0, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddq %ymm1, %ymm2, %ymm1
+; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm0, %xmm1, %xmm8
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i64_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpcomgtq %xmm0, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vpcomltq %xmm0, %xmm1, %xmm8
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm8, %ymm4
+; XOPAVX1-NEXT:    vblendvpd %ymm4, %ymm3, %ymm0, %ymm4
+; XOPAVX1-NEXT:    vblendvpd %ymm7, %ymm3, %ymm0, %ymm0
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm6, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i64_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm0, %zmm1, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlq $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlq $32, %ymm0, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512F-NEXT:    vpmuludq %ymm4, %ymm0, %ymm4
+; AVX512F-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddq %ymm1, %ymm2, %ymm1
+; AVX512F-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec256_i64_signed_mem_reg:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-NEXT:    vpcmpgtq %ymm0, %ymm1, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa64 %ymm2, %ymm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %ymm0, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpmaxsq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlq $1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpsrlq $32, %ymm0, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpaddq %ymm1, %ymm2, %ymm1
+; AVX512VL-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm0, %zmm1, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm0, %zmm1, %zmm0
+; AVX512BW-FALLBACK-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm4, %ymm0, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm1, %ymm2, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <4 x i64>, <4 x i64>* %a1_addr
+  %t3 = icmp sgt <4 x i64> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <4 x i1> %t3, <4 x i64> %a2, <4 x i64> %a1
+  %t6 = select <4 x i1> %t3, <4 x i64> %a1, <4 x i64> %a2
+  %t7 = sub <4 x i64> %t6, %t5
+  %t8 = lshr <4 x i64> %t7, <i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <4 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i64> %t9, %a1 ; signed
+  ret <4 x i64> %a10
+}
+
+define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, <4 x i64>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm0, %xmm6
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i64_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
+; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm3, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm1, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm3, %xmm0, %xmm3
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i64_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm2
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm3
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpcomgtq %xmm3, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm6, %ymm7
+; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm1, %xmm4
+; XOPAVX1-NEXT:    vpcomltq %xmm3, %xmm0, %xmm3
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm3, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm3, %ymm0, %ymm2, %ymm3
+; XOPAVX1-NEXT:    vblendvpd %ymm7, %ymm0, %ymm2, %ymm2
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm3, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm4, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm6, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i64_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512F-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec256_i64_signed_reg_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa64 %ymm2, %ymm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a2 = load <4 x i64>, <4 x i64>* %a2_addr
+  %t3 = icmp sgt <4 x i64> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <4 x i1> %t3, <4 x i64> %a2, <4 x i64> %a1
+  %t6 = select <4 x i1> %t3, <4 x i64> %a1, <4 x i64> %a2
+  %t7 = sub <4 x i64> %t6, %t5
+  %t8 = lshr <4 x i64> %t7, <i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <4 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i64> %t9, %a1 ; signed
+  ret <4 x i64> %a10
+}
+
+define <4 x i64> @vec256_i64_signed_mem_mem(<4 x i64>* %a1_addr, <4 x i64>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
+; AVX1-FALLBACK-NEXT:    vmovapd (%rsi), %ymm3
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm5
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm4, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
+; AVX1-FALLBACK-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm5
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i64_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %ymm3, %ymm2, %ymm3
+; AVX2-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm4, %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vblendvpd %ymm2, %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpsubq %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX2-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX2-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovapd (%rdi), %ymm2
+; XOP-FALLBACK-NEXT:    vmovapd (%rsi), %ymm3
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm4
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm5
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm5, %xmm1, %xmm6
+; XOP-FALLBACK-NEXT:    vpcomgtq %xmm4, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm5, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpcomltq %xmm4, %xmm0, %xmm4
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
+; XOP-FALLBACK-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOP-FALLBACK-NEXT:    vpor %xmm8, %xmm6, %xmm5
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOP-FALLBACK-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i64_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovapd (%rdi), %ymm2
+; XOPAVX1-NEXT:    vmovapd (%rsi), %ymm3
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm4
+; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm5
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; XOPAVX1-NEXT:    vpcomgtq %xmm5, %xmm1, %xmm6
+; XOPAVX1-NEXT:    vpcomgtq %xmm4, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm8
+; XOPAVX1-NEXT:    vpcomltq %xmm5, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpcomltq %xmm4, %xmm0, %xmm4
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm4, %ymm4
+; XOPAVX1-NEXT:    vblendvpd %ymm4, %ymm2, %ymm3, %ymm4
+; XOPAVX1-NEXT:    vblendvpd %ymm8, %ymm2, %ymm3, %ymm2
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm2, %xmm3
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm4, %xmm4
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm2, %xmm2
+; XOPAVX1-NEXT:    vpsubq %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlq $1, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm7, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpaddq %xmm4, %xmm7, %xmm4
+; XOPAVX1-NEXT:    vpsllq $32, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT:    vpor %xmm8, %xmm6, %xmm5
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm6, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm7
+; XOPAVX1-NEXT:    vpaddq %xmm7, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpsllq $32, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm6, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm3, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i64_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512F-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512F-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512F-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512F-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512F-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512F-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512F-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: vec256_i64_signed_mem_mem:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VL-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512VL-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
+; AVX512VL-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512VL-NEXT:    vmovdqa64 %ymm2, %ymm3 {%k1}
+; AVX512VL-NEXT:    vpminsq %ymm1, %ymm0, %ymm2
+; AVX512VL-NEXT:    vpmaxsq %ymm1, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlq $32, %ymm3, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm2, %ymm1, %ymm2
+; AVX512VL-NEXT:    vpsrlq $32, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpaddq %ymm4, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512VL-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512VL-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpbroadcastq {{.*#+}} ymm3 = [1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpsubq %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsrlq $32, %ymm3, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm4, %ymm1, %ymm4
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm2, %ymm4, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmuludq %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm2, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+  %a1 = load <4 x i64>, <4 x i64>* %a1_addr
+  %a2 = load <4 x i64>, <4 x i64>* %a2_addr
+  %t3 = icmp sgt <4 x i64> %a1, %a2 ; signed
+  %t4 = select <4 x i1> %t3, <4 x i64> <i64 -1, i64 -1, i64 -1, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <4 x i1> %t3, <4 x i64> %a2, <4 x i64> %a1
+  %t6 = select <4 x i1> %t3, <4 x i64> %a1, <4 x i64> %a2
+  %t7 = sub <4 x i64> %t6, %t5
+  %t8 = lshr <4 x i64> %t7, <i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <4 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <4 x i64> %t9, %a1 ; signed
+  ret <4 x i64> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit width. 256 / 16 = 16 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i16_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i16_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i16_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i16_signed_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpgtw %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp sgt <16 x i16> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <16 x i1> %t3, <16 x i16> %a2, <16 x i16> %a1
+  %t6 = select <16 x i1> %t3, <16 x i16> %a1, <16 x i16> %a2
+  %t7 = sub <16 x i16> %t6, %t5
+  %t16 = lshr <16 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <16 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <16 x i16> %t9, %a1 ; signed
+  ret <16 x i16> %a10
+}
+
+define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpminuw %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm8, %xmm8, %xmm8
+; AVX1-FALLBACK-NEXT:    vpxor %xmm8, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm7, %xmm0, %xmm6
+; AVX1-FALLBACK-NEXT:    vpxor %xmm8, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminuw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpcmpeqw %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtuw %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vpminuw %xmm2, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i16_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtuw %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminuw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpcmpeqw %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpternlogq $15, %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminuw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqw %ymm2, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm3, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleuw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminuw %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpnleuw %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminuw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxuw %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp ugt <16 x i16> %a1, %a2
+  %t4 = select <16 x i1> %t3, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <16 x i1> %t3, <16 x i16> %a2, <16 x i16> %a1
+  %t6 = select <16 x i1> %t3, <16 x i16> %a1, <16 x i16> %a2
+  %t7 = sub <16 x i16> %t6, %t5
+  %t16 = lshr <16 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul <16 x i16> %t16, %t4
+  %a10 = add <16 x i16> %t9, %a1
+  ret <16 x i16> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <16 x i16> @vec256_i16_signed_mem_reg(<16 x i16>* %a1_addr, <16 x i16> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i16_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsw %ymm0, %ymm1, %ymm3
+; AVX2-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsubw %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i16_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i16_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm0, %ymm1, %ymm3
+; AVX512F-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpsubw %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm0, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %ymm0, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsubw %ymm2, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm3, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i16_signed_mem_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtw %ymm0, %ymm1, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %ymm0, %ymm1, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsw %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    vpsubw %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmullw %ymm3, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <16 x i16>, <16 x i16>* %a1_addr
+  %t3 = icmp sgt <16 x i16> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <16 x i1> %t3, <16 x i16> %a2, <16 x i16> %a1
+  %t6 = select <16 x i1> %t3, <16 x i16> %a1, <16 x i16> %a2
+  %t7 = sub <16 x i16> %t6, %t5
+  %t16 = lshr <16 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <16 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <16 x i16> %t9, %a1 ; signed
+  ret <16 x i16> %a10
+}
+
+define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, <16 x i16>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i16_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i16_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOPAVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubw %xmm7, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i16_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i16_signed_reg_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtw %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a2 = load <16 x i16>, <16 x i16>* %a2_addr
+  %t3 = icmp sgt <16 x i16> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <16 x i1> %t3, <16 x i16> %a2, <16 x i16> %a1
+  %t6 = select <16 x i1> %t3, <16 x i16> %a1, <16 x i16> %a2
+  %t7 = sub <16 x i16> %t6, %t5
+  %t16 = lshr <16 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <16 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <16 x i16> %t9, %a1 ; signed
+  ret <16 x i16> %a10
+}
+
+define <16 x i16> @vec256_i16_signed_mem_mem(<16 x i16>* %a1_addr, <16 x i16>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i16_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOP-FALLBACK-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i16_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOPAVX1-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm7
+; XOPAVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i16_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i16_signed_mem_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VLBW-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtw %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu16 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsw %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsw %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubw %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <16 x i16>, <16 x i16>* %a1_addr
+  %a2 = load <16 x i16>, <16 x i16>* %a2_addr
+  %t3 = icmp sgt <16 x i16> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <16 x i1> %t3, <16 x i16> %a2, <16 x i16> %a1
+  %t6 = select <16 x i1> %t3, <16 x i16> %a1, <16 x i16> %a2
+  %t7 = sub <16 x i16> %t6, %t5
+  %t16 = lshr <16 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <16 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <16 x i16> %t9, %a1 ; signed
+  ret <16 x i16> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit width. 256 / 8 = 32 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm8
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i8_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i8_signed_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i8_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i8_signed_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpgtb %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp sgt <32 x i8> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <32 x i1> %t3, <32 x i8> %a2, <32 x i8> %a1
+  %t6 = select <32 x i1> %t3, <32 x i8> %a1, <32 x i8> %a2
+  %t7 = sub <32 x i8> %t6, %t5
+  %t8 = lshr <32 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <32 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <32 x i8> %t9, %a1 ; signed
+  ret <32 x i8> %a10
+}
+
+define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpminub %xmm3, %xmm2, %xmm4
+; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm4, %xmm2, %xmm5
+; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm5, %xmm8
+; AVX1-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm7, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpmaxub %xmm3, %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpand %xmm7, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpand %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpand %xmm7, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminub %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm3
+; AVX2-NEXT:    vpcmpeqd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX2-NEXT:    vpmaxub %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX2-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX2-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtub %xmm2, %xmm3, %xmm8
+; XOP-FALLBACK-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminub %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i8_unsigned_reg_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtub %xmm2, %xmm3, %xmm8
+; XOPAVX1-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminub %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpternlogq $15, %zmm3, %zmm3, %zmm3
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX512F-NEXT:    vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminub %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqb %ymm2, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm3, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vpcmpnleub %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminub %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vpcmpnleub %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminub %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxub %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %t3 = icmp ugt <32 x i8> %a1, %a2
+  %t4 = select <32 x i1> %t3, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <32 x i1> %t3, <32 x i8> %a2, <32 x i8> %a1
+  %t6 = select <32 x i1> %t3, <32 x i8> %a1, <32 x i8> %a2
+  %t7 = sub <32 x i8> %t6, %t5
+  %t8 = lshr <32 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul <32 x i8> %t8, %t4
+  %a10 = add <32 x i8> %t9, %a1
+  ret <32 x i8> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <32 x i8> @vec256_i8_signed_mem_reg(<32 x i8>* %a1_addr, <32 x i8> %a2) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm8
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i8_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsb %ymm0, %ymm1, %ymm3
+; AVX2-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubb %xmm7, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i8_signed_mem_reg:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOPAVX1-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
+; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm7
+; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vpsubb %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i8_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm0, %ymm1, %ymm3
+; AVX512F-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm0, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm3, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm0, %zmm1, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %ymm0, %ymm1, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsubb %ymm2, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i8_signed_mem_reg:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtb %ymm0, %ymm1, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %ymm0, %ymm1, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsb %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    vpsubb %ymm2, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmullw %zmm2, %zmm0, %zmm0
+; AVX512VLBW-NEXT:    vpmovwb %zmm0, %ymm0
+; AVX512VLBW-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <32 x i8>, <32 x i8>* %a1_addr
+  %t3 = icmp sgt <32 x i8> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <32 x i1> %t3, <32 x i8> %a2, <32 x i8> %a1
+  %t6 = select <32 x i1> %t3, <32 x i8> %a1, <32 x i8> %a2
+  %t7 = sub <32 x i8> %t6, %t5
+  %t8 = lshr <32 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <32 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <32 x i8> %t9, %a1 ; signed
+  ret <32 x i8> %a10
+}
+
+define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, <32 x i8>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm8
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm2, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i8_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i8_signed_reg_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm8
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm7
+; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i8_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i8_signed_reg_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtb %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a2 = load <32 x i8>, <32 x i8>* %a2_addr
+  %t3 = icmp sgt <32 x i8> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <32 x i1> %t3, <32 x i8> %a2, <32 x i8> %a1
+  %t6 = select <32 x i1> %t3, <32 x i8> %a1, <32 x i8> %a2
+  %t7 = sub <32 x i8> %t6, %t5
+  %t8 = lshr <32 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <32 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <32 x i8> %t9, %a1 ; signed
+  ret <32 x i8> %a10
+}
+
+define <32 x i8> @vec256_i8_signed_mem_mem(<32 x i8>* %a1_addr, <32 x i8>* %a2_addr) nounwind {
+; AVX1-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
+; AVX1-FALLBACK:       # %bb.0:
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm8
+; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
+; AVX1-FALLBACK-NEXT:    vpminsb %xmm2, %xmm0, %xmm7
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255]
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm2, %xmm2
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm5
+; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm4, %xmm4
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-FALLBACK-NEXT:    retq
+;
+; AVX2-LABEL: vec256_i8_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX2-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX2-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX2-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX2-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX2-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX2-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX2-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    retq
+;
+; XOP-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
+; XOP-FALLBACK:       # %bb.0:
+; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm8
+; XOP-FALLBACK-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
+; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
+; XOP-FALLBACK-NEXT:    vpminsb %xmm0, %xmm2, %xmm7
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
+; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
+; XOP-FALLBACK-NEXT:    vpsubb %xmm7, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; XOP-FALLBACK-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-FALLBACK-NEXT:    retq
+;
+; XOPAVX1-LABEL: vec256_i8_signed_mem_mem:
+; XOPAVX1:       # %bb.0:
+; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm8
+; XOPAVX1-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
+; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
+; XOPAVX1-NEXT:    vpminsb %xmm0, %xmm2, %xmm7
+; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
+; XOPAVX1-NEXT:    vpsubb %xmm7, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
+; XOPAVX1-NEXT:    vpor %xmm7, %xmm8, %xmm6
+; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
+; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm4, %xmm4
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpperm %xmm5, %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; XOPAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT:    retq
+;
+; AVX512F-LABEL: vec256_i8_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512F-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpor {{.*}}(%rip), %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
+; AVX512BW-FALLBACK:       # %bb.0:
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512BW-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-FALLBACK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-FALLBACK-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-FALLBACK-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512BW-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512BW-FALLBACK-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512BW-FALLBACK-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512BW-FALLBACK-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512BW-FALLBACK-NEXT:    retq
+;
+; AVX512VLBW-LABEL: vec256_i8_signed_mem_mem:
+; AVX512VLBW:       # %bb.0:
+; AVX512VLBW-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VLBW-NEXT:    vmovdqa (%rsi), %ymm1
+; AVX512VLBW-NEXT:    vpcmpgtb %ymm1, %ymm0, %k1
+; AVX512VLBW-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; AVX512VLBW-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VLBW-NEXT:    vmovdqu8 %ymm2, %ymm3 {%k1}
+; AVX512VLBW-NEXT:    vpminsb %ymm1, %ymm0, %ymm2
+; AVX512VLBW-NEXT:    vpmaxsb %ymm1, %ymm0, %ymm1
+; AVX512VLBW-NEXT:    vpsubb %ymm2, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpand {{.*}}(%rip), %ymm1, %ymm1
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
+; AVX512VLBW-NEXT:    vpmovzxbw {{.*#+}} zmm2 = ymm3[0],zero,ymm3[1],zero,ymm3[2],zero,ymm3[3],zero,ymm3[4],zero,ymm3[5],zero,ymm3[6],zero,ymm3[7],zero,ymm3[8],zero,ymm3[9],zero,ymm3[10],zero,ymm3[11],zero,ymm3[12],zero,ymm3[13],zero,ymm3[14],zero,ymm3[15],zero,ymm3[16],zero,ymm3[17],zero,ymm3[18],zero,ymm3[19],zero,ymm3[20],zero,ymm3[21],zero,ymm3[22],zero,ymm3[23],zero,ymm3[24],zero,ymm3[25],zero,ymm3[26],zero,ymm3[27],zero,ymm3[28],zero,ymm3[29],zero,ymm3[30],zero,ymm3[31],zero
+; AVX512VLBW-NEXT:    vpmullw %zmm2, %zmm1, %zmm1
+; AVX512VLBW-NEXT:    vpmovwb %zmm1, %ymm1
+; AVX512VLBW-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
+; AVX512VLBW-NEXT:    retq
+  %a1 = load <32 x i8>, <32 x i8>* %a1_addr
+  %a2 = load <32 x i8>, <32 x i8>* %a2_addr
+  %t3 = icmp sgt <32 x i8> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <32 x i1> %t3, <32 x i8> %a2, <32 x i8> %a1
+  %t6 = select <32 x i1> %t3, <32 x i8> %a1, <32 x i8> %a2
+  %t7 = sub <32 x i8> %t6, %t5
+  %t8 = lshr <32 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <32 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <32 x i8> %t9, %a1 ; signed
+  ret <32 x i8> %a10
+}

Added: llvm/trunk/test/CodeGen/X86/midpoint-int-vec-512.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/midpoint-int-vec-512.ll?rev=355436&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/midpoint-int-vec-512.ll (added)
+++ llvm/trunk/test/CodeGen/X86/midpoint-int-vec-512.ll Tue Mar  5 12:18:47 2019
@@ -0,0 +1,1299 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512VL-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW,AVX512BW-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VL,AVX512BW,AVX512VLBW
+
+; These test cases are inspired by C++2a std::midpoint().
+; See https://bugs.llvm.org/show_bug.cgi?id=40965
+
+; Using 512-bit vector regs.
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit width. 512 / 32 = 16 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <16 x i32> @vec512_i32_signed_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind {
+; ALL-LABEL: vec512_i32_signed_reg_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpminsd %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubd %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrld $1, %zmm1, %zmm1
+; ALL-NEXT:    vpmulld %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
+  %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
+  %t7 = sub <16 x i32> %t6, %t5
+  %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <16 x i32> %t16, %t16 ; signed
+  %a10 = add nsw <16 x i32> %t9, %a1 ; signed
+  ret <16 x i32> %a10
+}
+
+define <16 x i32> @vec512_i32_unsigned_reg_reg(<16 x i32> %a1, <16 x i32> %a2) nounwind {
+; ALL-LABEL: vec512_i32_unsigned_reg_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpminud %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxud %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubd %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrld $1, %zmm1, %zmm1
+; ALL-NEXT:    vpmulld %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %t3 = icmp ugt <16 x i32> %a1, %a2
+  %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
+  %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
+  %t7 = sub <16 x i32> %t6, %t5
+  %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul <16 x i32> %t16, %t16
+  %a10 = add <16 x i32> %t9, %a1
+  ret <16 x i32> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <16 x i32> @vec512_i32_signed_mem_reg(<16 x i32>* %a1_addr, <16 x i32> %a2) nounwind {
+; ALL-LABEL: vec512_i32_signed_mem_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; ALL-NEXT:    vpminsd %zmm0, %zmm1, %zmm2
+; ALL-NEXT:    vpmaxsd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    vpsubd %zmm2, %zmm0, %zmm0
+; ALL-NEXT:    vpsrld $1, %zmm0, %zmm0
+; ALL-NEXT:    vpmulld %zmm0, %zmm0, %zmm0
+; ALL-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %a1 = load <16 x i32>, <16 x i32>* %a1_addr
+  %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
+  %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
+  %t7 = sub <16 x i32> %t6, %t5
+  %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <16 x i32> %t16, %t16 ; signed
+  %a10 = add nsw <16 x i32> %t9, %a1 ; signed
+  ret <16 x i32> %a10
+}
+
+define <16 x i32> @vec512_i32_signed_reg_mem(<16 x i32> %a1, <16 x i32>* %a2_addr) nounwind {
+; ALL-LABEL: vec512_i32_signed_reg_mem:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; ALL-NEXT:    vpminsd %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubd %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrld $1, %zmm1, %zmm1
+; ALL-NEXT:    vpmulld %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %a2 = load <16 x i32>, <16 x i32>* %a2_addr
+  %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
+  %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
+  %t7 = sub <16 x i32> %t6, %t5
+  %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <16 x i32> %t16, %t16 ; signed
+  %a10 = add nsw <16 x i32> %t9, %a1 ; signed
+  ret <16 x i32> %a10
+}
+
+define <16 x i32> @vec512_i32_signed_mem_mem(<16 x i32>* %a1_addr, <16 x i32>* %a2_addr) nounwind {
+; ALL-LABEL: vec512_i32_signed_mem_mem:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm0
+; ALL-NEXT:    vmovdqa64 (%rsi), %zmm1
+; ALL-NEXT:    vpminsd %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsd %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubd %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrld $1, %zmm1, %zmm1
+; ALL-NEXT:    vpmulld %zmm1, %zmm1, %zmm1
+; ALL-NEXT:    vpaddd %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %a1 = load <16 x i32>, <16 x i32>* %a1_addr
+  %a2 = load <16 x i32>, <16 x i32>* %a2_addr
+  %t3 = icmp sgt <16 x i32> %a1, %a2 ; signed
+  %t4 = select <16 x i1> %t3, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t5 = select <16 x i1> %t3, <16 x i32> %a2, <16 x i32> %a1
+  %t6 = select <16 x i1> %t3, <16 x i32> %a1, <16 x i32> %a2
+  %t7 = sub <16 x i32> %t6, %t5
+  %t16 = lshr <16 x i32> %t7, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+  %t9 = mul nsw <16 x i32> %t16, %t16 ; signed
+  %a10 = add nsw <16 x i32> %t9, %a1 ; signed
+  ret <16 x i32> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit width. 512 / 64 = 8 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <8 x i64> @vec512_i64_signed_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind {
+; ALL-LABEL: vec512_i64_signed_reg_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; ALL-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; ALL-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubq %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $32, %zmm3, %zmm2
+; ALL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
+; ALL-NEXT:    vpsrlq $32, %zmm1, %zmm4
+; ALL-NEXT:    vpmuludq %zmm3, %zmm4, %zmm4
+; ALL-NEXT:    vpaddq %zmm4, %zmm2, %zmm2
+; ALL-NEXT:    vpsllq $32, %zmm2, %zmm2
+; ALL-NEXT:    vpmuludq %zmm3, %zmm1, %zmm1
+; ALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; ALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
+  %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
+  %t7 = sub <8 x i64> %t6, %t5
+  %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <8 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i64> %t9, %a1 ; signed
+  ret <8 x i64> %a10
+}
+
+define <8 x i64> @vec512_i64_unsigned_reg_reg(<8 x i64> %a1, <8 x i64> %a2) nounwind {
+; ALL-LABEL: vec512_i64_unsigned_reg_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vpcmpnleuq %zmm1, %zmm0, %k1
+; ALL-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; ALL-NEXT:    vpminuq %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxuq %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubq %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $32, %zmm3, %zmm2
+; ALL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
+; ALL-NEXT:    vpsrlq $32, %zmm1, %zmm4
+; ALL-NEXT:    vpmuludq %zmm3, %zmm4, %zmm4
+; ALL-NEXT:    vpaddq %zmm4, %zmm2, %zmm2
+; ALL-NEXT:    vpsllq $32, %zmm2, %zmm2
+; ALL-NEXT:    vpmuludq %zmm3, %zmm1, %zmm1
+; ALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; ALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %t3 = icmp ugt <8 x i64> %a1, %a2
+  %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
+  %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
+  %t7 = sub <8 x i64> %t6, %t5
+  %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul <8 x i64> %t8, %t4
+  %a10 = add <8 x i64> %t9, %a1
+  ret <8 x i64> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <8 x i64> @vec512_i64_signed_mem_reg(<8 x i64>* %a1_addr, <8 x i64> %a2) nounwind {
+; ALL-LABEL: vec512_i64_signed_mem_reg:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; ALL-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
+; ALL-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; ALL-NEXT:    vpminsq %zmm0, %zmm1, %zmm2
+; ALL-NEXT:    vpmaxsq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    vpsubq %zmm2, %zmm0, %zmm0
+; ALL-NEXT:    vpsrlq $1, %zmm0, %zmm0
+; ALL-NEXT:    vpsrlq $32, %zmm3, %zmm2
+; ALL-NEXT:    vpmuludq %zmm2, %zmm0, %zmm2
+; ALL-NEXT:    vpsrlq $32, %zmm0, %zmm4
+; ALL-NEXT:    vpmuludq %zmm3, %zmm4, %zmm4
+; ALL-NEXT:    vpaddq %zmm4, %zmm2, %zmm2
+; ALL-NEXT:    vpsllq $32, %zmm2, %zmm2
+; ALL-NEXT:    vpmuludq %zmm3, %zmm0, %zmm0
+; ALL-NEXT:    vpaddq %zmm1, %zmm2, %zmm1
+; ALL-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; ALL-NEXT:    retq
+  %a1 = load <8 x i64>, <8 x i64>* %a1_addr
+  %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
+  %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
+  %t7 = sub <8 x i64> %t6, %t5
+  %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <8 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i64> %t9, %a1 ; signed
+  ret <8 x i64> %a10
+}
+
+define <8 x i64> @vec512_i64_signed_reg_mem(<8 x i64> %a1, <8 x i64>* %a2_addr) nounwind {
+; ALL-LABEL: vec512_i64_signed_reg_mem:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm1
+; ALL-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; ALL-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; ALL-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubq %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $32, %zmm3, %zmm2
+; ALL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
+; ALL-NEXT:    vpsrlq $32, %zmm1, %zmm4
+; ALL-NEXT:    vpmuludq %zmm3, %zmm4, %zmm4
+; ALL-NEXT:    vpaddq %zmm4, %zmm2, %zmm2
+; ALL-NEXT:    vpsllq $32, %zmm2, %zmm2
+; ALL-NEXT:    vpmuludq %zmm3, %zmm1, %zmm1
+; ALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; ALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %a2 = load <8 x i64>, <8 x i64>* %a2_addr
+  %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
+  %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
+  %t7 = sub <8 x i64> %t6, %t5
+  %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <8 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i64> %t9, %a1 ; signed
+  ret <8 x i64> %a10
+}
+
+define <8 x i64> @vec512_i64_signed_mem_mem(<8 x i64>* %a1_addr, <8 x i64>* %a2_addr) nounwind {
+; ALL-LABEL: vec512_i64_signed_mem_mem:
+; ALL:       # %bb.0:
+; ALL-NEXT:    vmovdqa64 (%rdi), %zmm0
+; ALL-NEXT:    vmovdqa64 (%rsi), %zmm1
+; ALL-NEXT:    vpcmpgtq %zmm1, %zmm0, %k1
+; ALL-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; ALL-NEXT:    vpbroadcastq {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1]
+; ALL-NEXT:    vmovdqa64 %zmm2, %zmm3 {%k1}
+; ALL-NEXT:    vpminsq %zmm1, %zmm0, %zmm2
+; ALL-NEXT:    vpmaxsq %zmm1, %zmm0, %zmm1
+; ALL-NEXT:    vpsubq %zmm2, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $1, %zmm1, %zmm1
+; ALL-NEXT:    vpsrlq $32, %zmm3, %zmm2
+; ALL-NEXT:    vpmuludq %zmm2, %zmm1, %zmm2
+; ALL-NEXT:    vpsrlq $32, %zmm1, %zmm4
+; ALL-NEXT:    vpmuludq %zmm3, %zmm4, %zmm4
+; ALL-NEXT:    vpaddq %zmm4, %zmm2, %zmm2
+; ALL-NEXT:    vpsllq $32, %zmm2, %zmm2
+; ALL-NEXT:    vpmuludq %zmm3, %zmm1, %zmm1
+; ALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
+; ALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm0
+; ALL-NEXT:    retq
+  %a1 = load <8 x i64>, <8 x i64>* %a1_addr
+  %a2 = load <8 x i64>, <8 x i64>* %a2_addr
+  %t3 = icmp sgt <8 x i64> %a1, %a2 ; signed
+  %t4 = select <8 x i1> %t3, <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, <8 x i64> <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t5 = select <8 x i1> %t3, <8 x i64> %a2, <8 x i64> %a1
+  %t6 = select <8 x i1> %t3, <8 x i64> %a1, <8 x i64> %a2
+  %t7 = sub <8 x i64> %t6, %t5
+  %t8 = lshr <8 x i64> %t7, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+  %t9 = mul nsw <8 x i64> %t8, %t4 ; signed
+  %a10 = add nsw <8 x i64> %t9, %a1 ; signed
+  ret <8 x i64> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit width. 512 / 16 = 32 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <32 x i16> @vec512_i16_signed_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind {
+; AVX512F-LABEL: vec512_i16_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i16_signed_reg_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
+  %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
+  %t7 = sub <32 x i16> %t6, %t5
+  %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <32 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <32 x i16> %t9, %a1 ; signed
+  ret <32 x i16> %a10
+}
+
+define <32 x i16> @vec512_i16_unsigned_reg_reg(<32 x i16> %a1, <32 x i16> %a2) nounwind {
+; AVX512F-LABEL: vec512_i16_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminuw %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqw %ymm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $15, %zmm5, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpminuw %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpcmpeqw %ymm7, %ymm1, %ymm8
+; AVX512F-NEXT:    vpternlogq $15, %zmm8, %zmm8, %zmm8
+; AVX512F-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512F-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsubw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i16_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminuw %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqw %ymm4, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm5, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminuw %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqw %ymm7, %ymm1, %ymm8
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm8, %ymm8, %ymm8
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpmaxuw %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxuw %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i16_unsigned_reg_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpcmpnleuw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminuw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxuw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %t3 = icmp ugt <32 x i16> %a1, %a2
+  %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
+  %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
+  %t7 = sub <32 x i16> %t6, %t5
+  %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul <32 x i16> %t16, %t4
+  %a10 = add <32 x i16> %t9, %a1
+  ret <32 x i16> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <32 x i16> @vec512_i16_signed_mem_reg(<32 x i16>* %a1_addr, <32 x i16> %a2) nounwind {
+; AVX512F-LABEL: vec512_i16_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsw %ymm0, %ymm2, %ymm6
+; AVX512F-NEXT:    vpminsw %ymm1, %ymm3, %ymm7
+; AVX512F-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsubw %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsubw %ymm7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm0, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm1, %ymm3, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm0, %ymm2, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm1, %ymm3, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm6, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm7, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i16_signed_mem_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtw %zmm0, %zmm1, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsw %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vpmaxsw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vpsubw %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsrlw $1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpaddw %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %a1 = load <32 x i16>, <32 x i16>* %a1_addr
+  %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
+  %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
+  %t7 = sub <32 x i16> %t6, %t5
+  %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <32 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <32 x i16> %t9, %a1 ; signed
+  ret <32 x i16> %a10
+}
+
+define <32 x i16> @vec512_i16_signed_reg_mem(<32 x i16> %a1, <32 x i16>* %a2_addr) nounwind {
+; AVX512F-LABEL: vec512_i16_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i16_signed_reg_mem:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %a2 = load <32 x i16>, <32 x i16>* %a2_addr
+  %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
+  %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
+  %t7 = sub <32 x i16> %t6, %t5
+  %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <32 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <32 x i16> %t9, %a1 ; signed
+  ret <32 x i16> %a10
+}
+
+define <32 x i16> @vec512_i16_signed_mem_mem(<32 x i16>* %a1_addr, <32 x i16>* %a2_addr) nounwind {
+; AVX512F-LABEL: vec512_i16_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512F-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i16_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtw %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsw %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsw %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubw %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddw %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i16_signed_mem_mem:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtw %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu16 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsw %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsw %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubw %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddw %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %a1 = load <32 x i16>, <32 x i16>* %a1_addr
+  %a2 = load <32 x i16>, <32 x i16>* %a2_addr
+  %t3 = icmp sgt <32 x i16> %a1, %a2 ; signed
+  %t4 = select <32 x i1> %t3, <32 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t5 = select <32 x i1> %t3, <32 x i16> %a2, <32 x i16> %a1
+  %t6 = select <32 x i1> %t3, <32 x i16> %a1, <32 x i16> %a2
+  %t7 = sub <32 x i16> %t6, %t5
+  %t16 = lshr <32 x i16> %t7, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+  %t9 = mul nsw <32 x i16> %t16, %t4 ; signed
+  %a10 = add nsw <32 x i16> %t9, %a1 ; signed
+  ret <32 x i16> %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit width. 512 / 8 = 64 elts.
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define <64 x i8> @vec512_i8_signed_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
+; AVX512F-LABEL: vec512_i8_signed_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i8_signed_reg_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm0[8],zmm3[9],zmm0[9],zmm3[10],zmm0[10],zmm3[11],zmm0[11],zmm3[12],zmm0[12],zmm3[13],zmm0[13],zmm3[14],zmm0[14],zmm3[15],zmm0[15],zmm3[24],zmm0[24],zmm3[25],zmm0[25],zmm3[26],zmm0[26],zmm3[27],zmm0[27],zmm3[28],zmm0[28],zmm3[29],zmm0[29],zmm3[30],zmm0[30],zmm3[31],zmm0[31],zmm3[40],zmm0[40],zmm3[41],zmm0[41],zmm3[42],zmm0[42],zmm3[43],zmm0[43],zmm3[44],zmm0[44],zmm3[45],zmm0[45],zmm3[46],zmm0[46],zmm3[47],zmm0[47],zmm3[56],zmm0[56],zmm3[57],zmm0[57],zmm3[58],zmm0[58],zmm3[59],zmm0[59],zmm3[60],zmm0[60],zmm3[61],zmm0[61],zmm3[62],zmm0[62],zmm3[63],zmm0[63]
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55]
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
+  %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
+  %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
+  %t7 = sub <64 x i8> %t6, %t5
+  %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <64 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <64 x i8> %t9, %a1 ; signed
+  ret <64 x i8> %a10
+}
+
+define <64 x i8> @vec512_i8_unsigned_reg_reg(<64 x i8> %a1, <64 x i8> %a2) nounwind {
+; AVX512F-LABEL: vec512_i8_unsigned_reg_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vpminub %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vpcmpeqb %ymm4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpternlogq $15, %zmm5, %zmm5, %zmm5
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpminub %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpcmpeqb %ymm7, %ymm1, %ymm8
+; AVX512F-NEXT:    vpternlogq $15, %zmm8, %zmm8, %zmm8
+; AVX512F-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512F-NEXT:    vpmaxub %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpmaxub %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsubb %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm6[8],ymm0[8],ymm6[9],ymm0[9],ymm6[10],ymm0[10],ymm6[11],ymm0[11],ymm6[12],ymm0[12],ymm6[13],ymm0[13],ymm6[14],ymm0[14],ymm6[15],ymm0[15],ymm6[24],ymm0[24],ymm6[25],ymm0[25],ymm6[26],ymm0[26],ymm6[27],ymm0[27],ymm6[28],ymm0[28],ymm6[29],ymm0[29],ymm6[30],ymm0[30],ymm6[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[4],ymm0[4],ymm6[5],ymm0[5],ymm6[6],ymm0[6],ymm6[7],ymm0[7],ymm6[16],ymm0[16],ymm6[17],ymm0[17],ymm6[18],ymm0[18],ymm6[19],ymm0[19],ymm6[20],ymm0[20],ymm6[21],ymm0[21],ymm6[22],ymm0[22],ymm6[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpackuswb %ymm4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i8_unsigned_reg_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vpminub %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqb %ymm4, %ymm0, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm5, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminub %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpcmpeqb %ymm7, %ymm1, %ymm8
+; AVX512VL-FALLBACK-NEXT:    vpternlogq $15, %ymm8, %ymm8, %ymm8
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm6, %ymm8, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpmaxub %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxub %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm6[8],ymm0[8],ymm6[9],ymm0[9],ymm6[10],ymm0[10],ymm6[11],ymm0[11],ymm6[12],ymm0[12],ymm6[13],ymm0[13],ymm6[14],ymm0[14],ymm6[15],ymm0[15],ymm6[24],ymm0[24],ymm6[25],ymm0[25],ymm6[26],ymm0[26],ymm6[27],ymm0[27],ymm6[28],ymm0[28],ymm6[29],ymm0[29],ymm6[30],ymm0[30],ymm6[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm7, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm6 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[4],ymm0[4],ymm6[5],ymm0[5],ymm6[6],ymm0[6],ymm6[7],ymm0[7],ymm6[16],ymm0[16],ymm6[17],ymm0[17],ymm6[18],ymm0[18],ymm6[19],ymm0[19],ymm6[20],ymm0[20],ymm6[21],ymm0[21],ymm6[22],ymm0[22],ymm6[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm4, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm4 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i8_unsigned_reg_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vpcmpnleub %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminub %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxub %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm0[8],zmm3[9],zmm0[9],zmm3[10],zmm0[10],zmm3[11],zmm0[11],zmm3[12],zmm0[12],zmm3[13],zmm0[13],zmm3[14],zmm0[14],zmm3[15],zmm0[15],zmm3[24],zmm0[24],zmm3[25],zmm0[25],zmm3[26],zmm0[26],zmm3[27],zmm0[27],zmm3[28],zmm0[28],zmm3[29],zmm0[29],zmm3[30],zmm0[30],zmm3[31],zmm0[31],zmm3[40],zmm0[40],zmm3[41],zmm0[41],zmm3[42],zmm0[42],zmm3[43],zmm0[43],zmm3[44],zmm0[44],zmm3[45],zmm0[45],zmm3[46],zmm0[46],zmm3[47],zmm0[47],zmm3[56],zmm0[56],zmm3[57],zmm0[57],zmm3[58],zmm0[58],zmm3[59],zmm0[59],zmm3[60],zmm0[60],zmm3[61],zmm0[61],zmm3[62],zmm0[62],zmm3[63],zmm0[63]
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55]
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %t3 = icmp ugt <64 x i8> %a1, %a2
+  %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
+  %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
+  %t7 = sub <64 x i8> %t6, %t5
+  %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul <64 x i8> %t8, %t4
+  %a10 = add <64 x i8> %t9, %a1
+  ret <64 x i8> %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define <64 x i8> @vec512_i8_signed_mem_reg(<64 x i8>* %a1_addr, <64 x i8> %a2) nounwind {
+; AVX512F-LABEL: vec512_i8_signed_mem_reg:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsb %ymm0, %ymm2, %ymm6
+; AVX512F-NEXT:    vpminsb %ymm1, %ymm3, %ymm7
+; AVX512F-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpsubb %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsubb %ymm7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm6, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm7, %ymm1, %ymm1
+; AVX512F-NEXT:    vpackuswb %ymm6, %ymm1, %ymm1
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm7, %ymm0, %ymm0
+; AVX512F-NEXT:    vpackuswb %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_reg:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm0, %ymm2, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm1, %ymm3, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm0, %ymm2, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm1, %ymm3, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm6, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm7, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm6, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm5, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm2, %ymm0, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm3, %ymm1, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i8_signed_mem_reg:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtb %zmm0, %zmm1, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsb %zmm0, %zmm1, %zmm2
+; AVX512BW-NEXT:    vpmaxsb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    vpsubb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpsrlw $1, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm0[8],zmm3[9],zmm0[9],zmm3[10],zmm0[10],zmm3[11],zmm0[11],zmm3[12],zmm0[12],zmm3[13],zmm0[13],zmm3[14],zmm0[14],zmm3[15],zmm0[15],zmm3[24],zmm0[24],zmm3[25],zmm0[25],zmm3[26],zmm0[26],zmm3[27],zmm0[27],zmm3[28],zmm0[28],zmm3[29],zmm0[29],zmm3[30],zmm0[30],zmm3[31],zmm0[31],zmm3[40],zmm0[40],zmm3[41],zmm0[41],zmm3[42],zmm0[42],zmm3[43],zmm0[43],zmm3[44],zmm0[44],zmm3[45],zmm0[45],zmm3[46],zmm0[46],zmm3[47],zmm0[47],zmm3[56],zmm0[56],zmm3[57],zmm0[57],zmm3[58],zmm0[58],zmm3[59],zmm0[59],zmm3[60],zmm0[60],zmm3[61],zmm0[61],zmm3[62],zmm0[62],zmm3[63],zmm0[63]
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55]
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm0, %zmm0
+; AVX512BW-NEXT:    vpaddb %zmm1, %zmm0, %zmm0
+; AVX512BW-NEXT:    retq
+  %a1 = load <64 x i8>, <64 x i8>* %a1_addr
+  %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
+  %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
+  %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
+  %t7 = sub <64 x i8> %t6, %t5
+  %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <64 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <64 x i8> %t9, %a1 ; signed
+  ret <64 x i8> %a10
+}
+
+define <64 x i8> @vec512_i8_signed_reg_mem(<64 x i8> %a1, <64 x i8>* %a2_addr) nounwind {
+; AVX512F-LABEL: vec512_i8_signed_reg_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_reg_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i8_signed_reg_mem:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm0[8],zmm3[9],zmm0[9],zmm3[10],zmm0[10],zmm3[11],zmm0[11],zmm3[12],zmm0[12],zmm3[13],zmm0[13],zmm3[14],zmm0[14],zmm3[15],zmm0[15],zmm3[24],zmm0[24],zmm3[25],zmm0[25],zmm3[26],zmm0[26],zmm3[27],zmm0[27],zmm3[28],zmm0[28],zmm3[29],zmm0[29],zmm3[30],zmm0[30],zmm3[31],zmm0[31],zmm3[40],zmm0[40],zmm3[41],zmm0[41],zmm3[42],zmm0[42],zmm3[43],zmm0[43],zmm3[44],zmm0[44],zmm3[45],zmm0[45],zmm3[46],zmm0[46],zmm3[47],zmm0[47],zmm3[56],zmm0[56],zmm3[57],zmm0[57],zmm3[58],zmm0[58],zmm3[59],zmm0[59],zmm3[60],zmm0[60],zmm3[61],zmm0[61],zmm3[62],zmm0[62],zmm3[63],zmm0[63]
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55]
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %a2 = load <64 x i8>, <64 x i8>* %a2_addr
+  %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
+  %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
+  %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
+  %t7 = sub <64 x i8> %t6, %t5
+  %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <64 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <64 x i8> %t9, %a1 ; signed
+  ret <64 x i8> %a10
+}
+
+define <64 x i8> @vec512_i8_signed_mem_mem(<64 x i8>* %a1_addr, <64 x i8>* %a2_addr) nounwind {
+; AVX512F-LABEL: vec512_i8_signed_mem_mem:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512F-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512F-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512F-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512F-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512F-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512F-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512F-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512F-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512F-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512F-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512F-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512F-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512F-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512F-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512F-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512F-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512F-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-FALLBACK-LABEL: vec512_i8_signed_mem_mem:
+; AVX512VL-FALLBACK:       # %bb.0:
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rdi), %ymm0
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rdi), %ymm1
+; AVX512VL-FALLBACK-NEXT:    vmovdqa (%rsi), %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa 32(%rsi), %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm2, %ymm0, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX512VL-FALLBACK-NEXT:    vpcmpgtb %ymm3, %ymm1, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm2, %ymm0, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpminsb %ymm3, %ymm1, %ymm7
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm2, %ymm0, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpmaxsb %ymm3, %ymm1, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsubb %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpsrlw $1, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15],ymm3[24],ymm0[24],ymm3[25],ymm0[25],ymm3[26],ymm0[26],ymm3[27],ymm0[27],ymm3[28],ymm0[28],ymm3[29],ymm0[29],ymm3[30],ymm0[30],ymm3[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm7 = ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15],ymm5[24],ymm0[24],ymm5[25],ymm0[25],ymm5[26],ymm0[26],ymm5[27],ymm0[27],ymm5[28],ymm0[28],ymm5[29],ymm0[29],ymm5[30],ymm0[30],ymm5[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm6, %ymm6
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm3 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[16],ymm0[16],ymm3[17],ymm0[17],ymm3[18],ymm0[18],ymm3[19],ymm0[19],ymm3[20],ymm0[20],ymm3[21],ymm0[21],ymm3[22],ymm0[22],ymm3[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm5 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[16],ymm0[16],ymm5[17],ymm0[17],ymm5[18],ymm0[18],ymm5[19],ymm0[19],ymm5[20],ymm0[20],ymm5[21],ymm0[21],ymm5[22],ymm0[22],ymm5[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm5, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm6, %ymm3, %ymm3
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15],ymm2[24],ymm0[24],ymm2[25],ymm0[25],ymm2[26],ymm0[26],ymm2[27],ymm0[27],ymm2[28],ymm0[28],ymm2[29],ymm0[29],ymm2[30],ymm0[30],ymm2[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} ymm6 = ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15],ymm4[24],ymm0[24],ymm4[25],ymm0[25],ymm4[26],ymm0[26],ymm4[27],ymm0[27],ymm4[28],ymm0[28],ymm4[29],ymm0[29],ymm4[30],ymm0[30],ymm4[31],ymm0[31]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm6, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm5, %ymm5
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm2 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[16],ymm0[16],ymm2[17],ymm0[17],ymm2[18],ymm0[18],ymm2[19],ymm0[19],ymm2[20],ymm0[20],ymm2[21],ymm0[21],ymm2[22],ymm0[22],ymm2[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpunpcklbw {{.*#+}} ymm4 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[16],ymm0[16],ymm4[17],ymm0[17],ymm4[18],ymm0[18],ymm4[19],ymm0[19],ymm4[20],ymm0[20],ymm4[21],ymm0[21],ymm4[22],ymm0[22],ymm4[23],ymm0[23]
+; AVX512VL-FALLBACK-NEXT:    vpmullw %ymm4, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpand %ymm7, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpackuswb %ymm5, %ymm2, %ymm2
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm0, %ymm2, %ymm0
+; AVX512VL-FALLBACK-NEXT:    vpaddb %ymm1, %ymm3, %ymm1
+; AVX512VL-FALLBACK-NEXT:    retq
+;
+; AVX512BW-LABEL: vec512_i8_signed_mem_mem:
+; AVX512BW:       # %bb.0:
+; AVX512BW-NEXT:    vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT:    vmovdqa64 (%rsi), %zmm1
+; AVX512BW-NEXT:    vpcmpgtb %zmm1, %zmm0, %k1
+; AVX512BW-NEXT:    vpternlogd $255, %zmm2, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX512BW-NEXT:    vmovdqu8 %zmm2, %zmm3 {%k1}
+; AVX512BW-NEXT:    vpminsb %zmm1, %zmm0, %zmm2
+; AVX512BW-NEXT:    vpmaxsb %zmm1, %zmm0, %zmm1
+; AVX512BW-NEXT:    vpsubb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpsrlw $1, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq {{.*}}(%rip), %zmm1, %zmm1
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
+; AVX512BW-NEXT:    vpunpckhbw {{.*#+}} zmm4 = zmm3[8],zmm0[8],zmm3[9],zmm0[9],zmm3[10],zmm0[10],zmm3[11],zmm0[11],zmm3[12],zmm0[12],zmm3[13],zmm0[13],zmm3[14],zmm0[14],zmm3[15],zmm0[15],zmm3[24],zmm0[24],zmm3[25],zmm0[25],zmm3[26],zmm0[26],zmm3[27],zmm0[27],zmm3[28],zmm0[28],zmm3[29],zmm0[29],zmm3[30],zmm0[30],zmm3[31],zmm0[31],zmm3[40],zmm0[40],zmm3[41],zmm0[41],zmm3[42],zmm0[42],zmm3[43],zmm0[43],zmm3[44],zmm0[44],zmm3[45],zmm0[45],zmm3[46],zmm0[46],zmm3[47],zmm0[47],zmm3[56],zmm0[56],zmm3[57],zmm0[57],zmm3[58],zmm0[58],zmm3[59],zmm0[59],zmm3[60],zmm0[60],zmm3[61],zmm0[61],zmm3[62],zmm0[62],zmm3[63],zmm0[63]
+; AVX512BW-NEXT:    vpmullw %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm2, %zmm2
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
+; AVX512BW-NEXT:    vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55]
+; AVX512BW-NEXT:    vpmullw %zmm3, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpandq %zmm4, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpackuswb %zmm2, %zmm1, %zmm1
+; AVX512BW-NEXT:    vpaddb %zmm0, %zmm1, %zmm0
+; AVX512BW-NEXT:    retq
+  %a1 = load <64 x i8>, <64 x i8>* %a1_addr
+  %a2 = load <64 x i8>, <64 x i8>* %a2_addr
+  %t3 = icmp sgt <64 x i8> %a1, %a2 ; signed
+  %t4 = select <64 x i1> %t3, <64 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <64 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t5 = select <64 x i1> %t3, <64 x i8> %a2, <64 x i8> %a1
+  %t6 = select <64 x i1> %t3, <64 x i8> %a1, <64 x i8> %a2
+  %t7 = sub <64 x i8> %t6, %t5
+  %t8 = lshr <64 x i8> %t7, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+  %t9 = mul nsw <64 x i8> %t8, %t4 ; signed
+  %a10 = add nsw <64 x i8> %t9, %a1 ; signed
+  ret <64 x i8> %a10
+}

Added: llvm/trunk/test/CodeGen/X86/midpoint-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/midpoint-int.ll?rev=355436&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/midpoint-int.ll (added)
+++ llvm/trunk/test/CodeGen/X86/midpoint-int.ll Tue Mar  5 12:18:47 2019
@@ -0,0 +1,1314 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=ALL,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=ALL,X32
+
+; These test cases are inspired by C++2a std::midpoint().
+; See https://bugs.llvm.org/show_bug.cgi?id=40965
+
+; ---------------------------------------------------------------------------- ;
+; 32-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i32 @scalar_i32_signed_reg_reg(i32 %a1, i32 %a2) nounwind {
+; X64-LABEL: scalar_i32_signed_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setle %al
+; X64-NEXT:    leal -1(%rax,%rax), %eax
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    cmovgl %esi, %ecx
+; X64-NEXT:    cmovgel %edi, %esi
+; X64-NEXT:    subl %ecx, %esi
+; X64-NEXT:    shrl %esi
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i32_signed_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:    cmpl %edx, %ecx
+; X32-NEXT:    setle %al
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    jg .LBB0_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB0_2:
+; X32-NEXT:    leal -1(%eax,%eax), %edi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    jge .LBB0_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:  .LBB0_4:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edi, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_unsigned_reg_reg(i32 %a1, i32 %a2) nounwind {
+; X64-LABEL: scalar_i32_unsigned_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl %esi, %edi
+; X64-NEXT:    setbe %al
+; X64-NEXT:    leal -1(%rax,%rax), %eax
+; X64-NEXT:    movl %edi, %ecx
+; X64-NEXT:    cmoval %esi, %ecx
+; X64-NEXT:    cmoval %edi, %esi
+; X64-NEXT:    subl %ecx, %esi
+; X64-NEXT:    shrl %esi
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i32_unsigned_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpl %eax, %ecx
+; X32-NEXT:    setbe %dl
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    ja .LBB1_1
+; X32-NEXT:  # %bb.2:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:    jmp .LBB1_3
+; X32-NEXT:  .LBB1_1:
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:  .LBB1_3:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+  %t3 = icmp ugt i32 %a1, %a2
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul i32 %t8, %t4
+  %a10 = add i32 %t9, %a1
+  ret i32 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i32 @scalar_i32_signed_mem_reg(i32* %a1_addr, i32 %a2) nounwind {
+; X64-LABEL: scalar_i32_signed_mem_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %ecx
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpl %esi, %ecx
+; X64-NEXT:    setle %al
+; X64-NEXT:    leal -1(%rax,%rax), %eax
+; X64-NEXT:    movl %ecx, %edx
+; X64-NEXT:    cmovgl %esi, %edx
+; X64-NEXT:    cmovgel %ecx, %esi
+; X64-NEXT:    subl %edx, %esi
+; X64-NEXT:    shrl %esi
+; X64-NEXT:    imull %esi, %eax
+; X64-NEXT:    addl %ecx, %eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i32_signed_mem_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %ecx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:    cmpl %edx, %ecx
+; X32-NEXT:    setle %al
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    jg .LBB2_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB2_2:
+; X32-NEXT:    leal -1(%eax,%eax), %edi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    jge .LBB2_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:  .LBB2_4:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edi, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a1 = load i32, i32* %a1_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_signed_reg_mem(i32 %a1, i32* %a2_addr) nounwind {
+; X64-LABEL: scalar_i32_signed_reg_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rsi), %eax
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpl %eax, %edi
+; X64-NEXT:    setle %cl
+; X64-NEXT:    leal -1(%rcx,%rcx), %ecx
+; X64-NEXT:    movl %edi, %edx
+; X64-NEXT:    cmovgl %eax, %edx
+; X64-NEXT:    cmovgel %edi, %eax
+; X64-NEXT:    subl %edx, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %ecx, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i32_signed_reg_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:    cmpl %edx, %ecx
+; X32-NEXT:    setle %al
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    jg .LBB3_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB3_2:
+; X32-NEXT:    leal -1(%eax,%eax), %edi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    jge .LBB3_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:  .LBB3_4:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edi, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a2 = load i32, i32* %a2_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+define i32 @scalar_i32_signed_mem_mem(i32* %a1_addr, i32* %a2_addr) nounwind {
+; X64-LABEL: scalar_i32_signed_mem_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %ecx
+; X64-NEXT:    movl (%rsi), %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpl %eax, %ecx
+; X64-NEXT:    setle %dl
+; X64-NEXT:    leal -1(%rdx,%rdx), %edx
+; X64-NEXT:    movl %ecx, %esi
+; X64-NEXT:    cmovgl %eax, %esi
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    subl %esi, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %edx, %eax
+; X64-NEXT:    addl %ecx, %eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i32_signed_mem_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl (%ecx), %ecx
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    xorl %eax, %eax
+; X32-NEXT:    cmpl %edx, %ecx
+; X32-NEXT:    setle %al
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:    jg .LBB4_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB4_2:
+; X32-NEXT:    leal -1(%eax,%eax), %edi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    jge .LBB4_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:  .LBB4_4:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edi, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a1 = load i32, i32* %a1_addr
+  %a2 = load i32, i32* %a2_addr
+  %t3 = icmp sgt i32 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i32 -1, i32 1
+  %t5 = select i1 %t3, i32 %a2, i32 %a1
+  %t6 = select i1 %t3, i32 %a1, i32 %a2
+  %t7 = sub i32 %t6, %t5
+  %t8 = lshr i32 %t7, 1
+  %t9 = mul nsw i32 %t8, %t4 ; signed
+  %a10 = add nsw i32 %t9, %a1 ; signed
+  ret i32 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 64-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i64 @scalar_i64_signed_reg_reg(i64 %a1, i64 %a2) nounwind {
+; X64-LABEL: scalar_i64_signed_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setle %al
+; X64-NEXT:    leaq -1(%rax,%rax), %rax
+; X64-NEXT:    movq %rdi, %rcx
+; X64-NEXT:    cmovgq %rsi, %rcx
+; X64-NEXT:    cmovgeq %rdi, %rsi
+; X64-NEXT:    subq %rcx, %rsi
+; X64-NEXT:    shrq %rsi
+; X64-NEXT:    imulq %rsi, %rax
+; X64-NEXT:    addq %rdi, %rax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i64_signed_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    pushl %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X32-NEXT:    cmpl %esi, %edx
+; X32-NEXT:    movl %ebp, %eax
+; X32-NEXT:    sbbl %ecx, %eax
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    movl $-1, %edi
+; X32-NEXT:    movl $-1, %ebx
+; X32-NEXT:    jl .LBB5_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    xorl %ebx, %ebx
+; X32-NEXT:    movl $1, %edi
+; X32-NEXT:    movl %ecx, %ebp
+; X32-NEXT:    movl %esi, %edx
+; X32-NEXT:  .LBB5_2:
+; X32-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X32-NEXT:    cmpl %eax, %esi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    sbbl %edi, %eax
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    jge .LBB5_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:  .LBB5_4:
+; X32-NEXT:    subl %edx, %eax
+; X32-NEXT:    sbbl %ebp, %ecx
+; X32-NEXT:    shrdl $1, %ecx, %eax
+; X32-NEXT:    imull %eax, %ebx
+; X32-NEXT:    movl (%esp), %esi # 4-byte Reload
+; X32-NEXT:    mull %esi
+; X32-NEXT:    addl %ebx, %edx
+; X32-NEXT:    shrl %ecx
+; X32-NEXT:    imull %esi, %ecx
+; X32-NEXT:    addl %ecx, %edx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    addl $4, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_unsigned_reg_reg(i64 %a1, i64 %a2) nounwind {
+; X64-LABEL: scalar_i64_unsigned_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq %rsi, %rdi
+; X64-NEXT:    setbe %al
+; X64-NEXT:    leaq -1(%rax,%rax), %rax
+; X64-NEXT:    movq %rdi, %rcx
+; X64-NEXT:    cmovaq %rsi, %rcx
+; X64-NEXT:    cmovaq %rdi, %rsi
+; X64-NEXT:    subq %rcx, %rsi
+; X64-NEXT:    shrq %rsi
+; X64-NEXT:    imulq %rsi, %rax
+; X64-NEXT:    addq %rdi, %rax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i64_unsigned_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    cmpl %ecx, %eax
+; X32-NEXT:    movl %edi, %edx
+; X32-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl $-1, %ebx
+; X32-NEXT:    jb .LBB6_1
+; X32-NEXT:  # %bb.2:
+; X32-NEXT:    xorl %ebp, %ebp
+; X32-NEXT:    movl $1, %ebx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:    jmp .LBB6_3
+; X32-NEXT:  .LBB6_1:
+; X32-NEXT:    movl $-1, %ebp
+; X32-NEXT:    movl %edi, %edx
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:  .LBB6_3:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    sbbl %edx, %edi
+; X32-NEXT:    shrdl $1, %edi, %eax
+; X32-NEXT:    imull %eax, %ebp
+; X32-NEXT:    mull %ebx
+; X32-NEXT:    addl %ebp, %edx
+; X32-NEXT:    shrl %edi
+; X32-NEXT:    imull %ebx, %edi
+; X32-NEXT:    addl %edi, %edx
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+  %t3 = icmp ugt i64 %a1, %a2
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul i64 %t8, %t4
+  %a10 = add i64 %t9, %a1
+  ret i64 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i64 @scalar_i64_signed_mem_reg(i64* %a1_addr, i64 %a2) nounwind {
+; X64-LABEL: scalar_i64_signed_mem_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rcx
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpq %rsi, %rcx
+; X64-NEXT:    setle %al
+; X64-NEXT:    leaq -1(%rax,%rax), %rax
+; X64-NEXT:    movq %rcx, %rdx
+; X64-NEXT:    cmovgq %rsi, %rdx
+; X64-NEXT:    cmovgeq %rcx, %rsi
+; X64-NEXT:    subq %rdx, %rsi
+; X64-NEXT:    shrq %rsi
+; X64-NEXT:    imulq %rsi, %rax
+; X64-NEXT:    addq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i64_signed_mem_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    pushl %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %esi
+; X32-NEXT:    movl 4(%eax), %ebp
+; X32-NEXT:    cmpl %esi, %ecx
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:    sbbl %ebp, %eax
+; X32-NEXT:    movl $-1, %eax
+; X32-NEXT:    movl $-1, %ebx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    jl .LBB7_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    xorl %ebx, %ebx
+; X32-NEXT:    movl $1, %eax
+; X32-NEXT:    movl %ebp, %edx
+; X32-NEXT:    movl %esi, %edi
+; X32-NEXT:  .LBB7_2:
+; X32-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X32-NEXT:    cmpl %ecx, %esi
+; X32-NEXT:    movl %ebp, %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    sbbl %ecx, %eax
+; X32-NEXT:    movl %ebp, %ecx
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    jge .LBB7_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:  .LBB7_4:
+; X32-NEXT:    subl %edi, %eax
+; X32-NEXT:    sbbl %edx, %ecx
+; X32-NEXT:    shrdl $1, %ecx, %eax
+; X32-NEXT:    imull %eax, %ebx
+; X32-NEXT:    movl (%esp), %edi # 4-byte Reload
+; X32-NEXT:    mull %edi
+; X32-NEXT:    addl %ebx, %edx
+; X32-NEXT:    shrl %ecx
+; X32-NEXT:    imull %edi, %ecx
+; X32-NEXT:    addl %ecx, %edx
+; X32-NEXT:    addl %esi, %eax
+; X32-NEXT:    adcl %ebp, %edx
+; X32-NEXT:    addl $4, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+  %a1 = load i64, i64* %a1_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_signed_reg_mem(i64 %a1, i64* %a2_addr) nounwind {
+; X64-LABEL: scalar_i64_signed_reg_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rsi), %rax
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpq %rax, %rdi
+; X64-NEXT:    setle %cl
+; X64-NEXT:    leaq -1(%rcx,%rcx), %rcx
+; X64-NEXT:    movq %rdi, %rdx
+; X64-NEXT:    cmovgq %rax, %rdx
+; X64-NEXT:    cmovgeq %rdi, %rax
+; X64-NEXT:    subq %rdx, %rax
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    imulq %rcx, %rax
+; X64-NEXT:    addq %rdi, %rax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i64_signed_reg_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $8, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    movl 4(%eax), %ebp
+; X32-NEXT:    cmpl %esi, %edx
+; X32-NEXT:    movl %ebp, %eax
+; X32-NEXT:    sbbl %ecx, %eax
+; X32-NEXT:    movl $-1, %eax
+; X32-NEXT:    movl $-1, %ebx
+; X32-NEXT:    movl %ebp, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %edx, %edi
+; X32-NEXT:    jl .LBB8_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    xorl %ebx, %ebx
+; X32-NEXT:    movl $1, %eax
+; X32-NEXT:    movl %ecx, (%esp) # 4-byte Spill
+; X32-NEXT:    movl %esi, %edi
+; X32-NEXT:  .LBB8_2:
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    cmpl %edx, %esi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:    sbbl %ebp, %eax
+; X32-NEXT:    jge .LBB8_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %ebp, %ecx
+; X32-NEXT:    movl %edx, %esi
+; X32-NEXT:  .LBB8_4:
+; X32-NEXT:    subl %edi, %esi
+; X32-NEXT:    sbbl (%esp), %ecx # 4-byte Folded Reload
+; X32-NEXT:    shrdl $1, %ecx, %esi
+; X32-NEXT:    imull %esi, %ebx
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X32-NEXT:    mull %esi
+; X32-NEXT:    addl %ebx, %edx
+; X32-NEXT:    shrl %ecx
+; X32-NEXT:    imull %esi, %ecx
+; X32-NEXT:    addl %ecx, %edx
+; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X32-NEXT:    addl $8, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+  %a2 = load i64, i64* %a2_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+define i64 @scalar_i64_signed_mem_mem(i64* %a1_addr, i64* %a2_addr) nounwind {
+; X64-LABEL: scalar_i64_signed_mem_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movq (%rdi), %rcx
+; X64-NEXT:    movq (%rsi), %rax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpq %rax, %rcx
+; X64-NEXT:    setle %dl
+; X64-NEXT:    leaq -1(%rdx,%rdx), %rdx
+; X64-NEXT:    movq %rcx, %rsi
+; X64-NEXT:    cmovgq %rax, %rsi
+; X64-NEXT:    cmovgeq %rcx, %rax
+; X64-NEXT:    subq %rsi, %rax
+; X64-NEXT:    shrq %rax
+; X64-NEXT:    imulq %rdx, %rax
+; X64-NEXT:    addq %rcx, %rax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i64_signed_mem_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    pushl %ebx
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    subl $12, %esp
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl (%ecx), %esi
+; X32-NEXT:    movl 4(%ecx), %edi
+; X32-NEXT:    movl (%eax), %edx
+; X32-NEXT:    movl 4(%eax), %ebp
+; X32-NEXT:    cmpl %esi, %edx
+; X32-NEXT:    movl %ebp, %eax
+; X32-NEXT:    sbbl %edi, %eax
+; X32-NEXT:    movl $-1, %eax
+; X32-NEXT:    movl $-1, %ebx
+; X32-NEXT:    movl %ebp, %ecx
+; X32-NEXT:    movl %edx, (%esp) # 4-byte Spill
+; X32-NEXT:    jl .LBB9_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    xorl %ebx, %ebx
+; X32-NEXT:    movl $1, %eax
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl %esi, (%esp) # 4-byte Spill
+; X32-NEXT:  .LBB9_2:
+; X32-NEXT:    movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X32-NEXT:    cmpl %edx, %esi
+; X32-NEXT:    movl %edi, %eax
+; X32-NEXT:    sbbl %ebp, %eax
+; X32-NEXT:    movl %edi, %ecx
+; X32-NEXT:    movl %esi, %eax
+; X32-NEXT:    jge .LBB9_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %ebp, %ecx
+; X32-NEXT:    movl %edx, %eax
+; X32-NEXT:  .LBB9_4:
+; X32-NEXT:    subl (%esp), %eax # 4-byte Folded Reload
+; X32-NEXT:    sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X32-NEXT:    shrdl $1, %ecx, %eax
+; X32-NEXT:    imull %eax, %ebx
+; X32-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X32-NEXT:    mull %ebp
+; X32-NEXT:    addl %ebx, %edx
+; X32-NEXT:    shrl %ecx
+; X32-NEXT:    imull %ebp, %ecx
+; X32-NEXT:    addl %ecx, %edx
+; X32-NEXT:    addl %esi, %eax
+; X32-NEXT:    adcl %edi, %edx
+; X32-NEXT:    addl $12, %esp
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    popl %ebx
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+  %a1 = load i64, i64* %a1_addr
+  %a2 = load i64, i64* %a2_addr
+  %t3 = icmp sgt i64 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i64 -1, i64 1
+  %t5 = select i1 %t3, i64 %a2, i64 %a1
+  %t6 = select i1 %t3, i64 %a1, i64 %a2
+  %t7 = sub i64 %t6, %t5
+  %t8 = lshr i64 %t7, 1
+  %t9 = mul nsw i64 %t8, %t4 ; signed
+  %a10 = add nsw i64 %t9, %a1 ; signed
+  ret i64 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 16-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
+; X64-LABEL: scalar_i16_signed_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpw %si, %di
+; X64-NEXT:    setle %al
+; X64-NEXT:    leal -1(%rax,%rax), %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    cmovgl %esi, %eax
+; X64-NEXT:    cmovgel %edi, %esi
+; X64-NEXT:    subl %eax, %esi
+; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %ecx, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i16_signed_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpw %ax, %cx
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    jg .LBB10_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB10_2:
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    jge .LBB10_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:  .LBB10_4:
+; X32-NEXT:    subl %esi, %edi
+; X32-NEXT:    movzwl %di, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    # kill: def $ax killed $ax killed $eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
+; X64-LABEL: scalar_i16_unsigned_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpw %si, %di
+; X64-NEXT:    setbe %al
+; X64-NEXT:    leal -1(%rax,%rax), %ecx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    cmoval %esi, %eax
+; X64-NEXT:    cmoval %edi, %esi
+; X64-NEXT:    subl %eax, %esi
+; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %ecx, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i16_unsigned_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpw %ax, %cx
+; X32-NEXT:    setbe %dl
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    ja .LBB11_1
+; X32-NEXT:  # %bb.2:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:    jmp .LBB11_3
+; X32-NEXT:  .LBB11_1:
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    movl %ecx, %eax
+; X32-NEXT:  .LBB11_3:
+; X32-NEXT:    subl %esi, %eax
+; X32-NEXT:    movzwl %ax, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    # kill: def $ax killed $ax killed $eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    retl
+  %t3 = icmp ugt i16 %a1, %a2
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul i16 %t8, %t4
+  %a10 = add i16 %t9, %a1
+  ret i16 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i16 @scalar_i16_signed_mem_reg(i16* %a1_addr, i16 %a2) nounwind {
+; X64-LABEL: scalar_i16_signed_mem_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    movzwl (%rdi), %ecx
+; X64-NEXT:    xorl %eax, %eax
+; X64-NEXT:    cmpw %si, %cx
+; X64-NEXT:    setle %al
+; X64-NEXT:    leal -1(%rax,%rax), %edx
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    cmovgl %esi, %eax
+; X64-NEXT:    cmovgel %ecx, %esi
+; X64-NEXT:    subl %eax, %esi
+; X64-NEXT:    movzwl %si, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %edx, %eax
+; X64-NEXT:    addl %ecx, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i16_signed_mem_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movzwl (%ecx), %ecx
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpw %ax, %cx
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    jg .LBB12_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB12_2:
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    jge .LBB12_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:  .LBB12_4:
+; X32-NEXT:    subl %esi, %edi
+; X32-NEXT:    movzwl %di, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    # kill: def $ax killed $ax killed $eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a1 = load i16, i16* %a1_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_signed_reg_mem(i16 %a1, i16* %a2_addr) nounwind {
+; X64-LABEL: scalar_i16_signed_reg_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movzwl (%rsi), %eax
+; X64-NEXT:    xorl %ecx, %ecx
+; X64-NEXT:    cmpw %ax, %di
+; X64-NEXT:    setle %cl
+; X64-NEXT:    leal -1(%rcx,%rcx), %ecx
+; X64-NEXT:    movl %edi, %edx
+; X64-NEXT:    cmovgl %eax, %edx
+; X64-NEXT:    cmovgel %edi, %eax
+; X64-NEXT:    subl %edx, %eax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %ecx, %eax
+; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i16_signed_reg_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movzwl (%eax), %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpw %ax, %cx
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    jg .LBB13_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB13_2:
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    jge .LBB13_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:  .LBB13_4:
+; X32-NEXT:    subl %esi, %edi
+; X32-NEXT:    movzwl %di, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    # kill: def $ax killed $ax killed $eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a2 = load i16, i16* %a2_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+define i16 @scalar_i16_signed_mem_mem(i16* %a1_addr, i16* %a2_addr) nounwind {
+; X64-LABEL: scalar_i16_signed_mem_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movzwl (%rdi), %ecx
+; X64-NEXT:    movzwl (%rsi), %eax
+; X64-NEXT:    xorl %edx, %edx
+; X64-NEXT:    cmpw %ax, %cx
+; X64-NEXT:    setle %dl
+; X64-NEXT:    leal -1(%rdx,%rdx), %edx
+; X64-NEXT:    movl %ecx, %esi
+; X64-NEXT:    cmovgl %eax, %esi
+; X64-NEXT:    cmovgel %ecx, %eax
+; X64-NEXT:    subl %esi, %eax
+; X64-NEXT:    movzwl %ax, %eax
+; X64-NEXT:    shrl %eax
+; X64-NEXT:    imull %edx, %eax
+; X64-NEXT:    addl %ecx, %eax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i16_signed_mem_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    pushl %edi
+; X32-NEXT:    pushl %esi
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movzwl (%ecx), %ecx
+; X32-NEXT:    movzwl (%eax), %eax
+; X32-NEXT:    xorl %edx, %edx
+; X32-NEXT:    cmpw %ax, %cx
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movl %eax, %esi
+; X32-NEXT:    jg .LBB14_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movl %ecx, %esi
+; X32-NEXT:  .LBB14_2:
+; X32-NEXT:    leal -1(%edx,%edx), %edx
+; X32-NEXT:    movl %ecx, %edi
+; X32-NEXT:    jge .LBB14_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movl %eax, %edi
+; X32-NEXT:  .LBB14_4:
+; X32-NEXT:    subl %esi, %edi
+; X32-NEXT:    movzwl %di, %eax
+; X32-NEXT:    shrl %eax
+; X32-NEXT:    imull %edx, %eax
+; X32-NEXT:    addl %ecx, %eax
+; X32-NEXT:    # kill: def $ax killed $ax killed $eax
+; X32-NEXT:    popl %esi
+; X32-NEXT:    popl %edi
+; X32-NEXT:    retl
+  %a1 = load i16, i16* %a1_addr
+  %a2 = load i16, i16* %a2_addr
+  %t3 = icmp sgt i16 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i16 -1, i16 1
+  %t5 = select i1 %t3, i16 %a2, i16 %a1
+  %t6 = select i1 %t3, i16 %a1, i16 %a2
+  %t7 = sub i16 %t6, %t5
+  %t8 = lshr i16 %t7, 1
+  %t9 = mul nsw i16 %t8, %t4 ; signed
+  %a10 = add nsw i16 %t9, %a1 ; signed
+  ret i16 %a10
+}
+
+; ---------------------------------------------------------------------------- ;
+; 8-bit width
+; ---------------------------------------------------------------------------- ;
+
+; Values come from regs
+
+define i8 @scalar_i8_signed_reg_reg(i8 %a1, i8 %a2) nounwind {
+; X64-LABEL: scalar_i8_signed_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    cmpb %sil, %dil
+; X64-NEXT:    setle %cl
+; X64-NEXT:    movl %esi, %edx
+; X64-NEXT:    jg .LBB15_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    movl %edi, %edx
+; X64-NEXT:  .LBB15_2:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    jge .LBB15_4
+; X64-NEXT:  # %bb.3:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:  .LBB15_4:
+; X64-NEXT:    subb %dl, %al
+; X64-NEXT:    addb %cl, %cl
+; X64-NEXT:    decb %cl
+; X64-NEXT:    shrb %al
+; X64-NEXT:    mulb %cl
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i8_signed_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %ah
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    cmpb %ah, %cl
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movb %ah, %ch
+; X32-NEXT:    jg .LBB15_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movb %cl, %ch
+; X32-NEXT:  .LBB15_2:
+; X32-NEXT:    movb %cl, %al
+; X32-NEXT:    jge .LBB15_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movb %ah, %al
+; X32-NEXT:  .LBB15_4:
+; X32-NEXT:    subb %ch, %al
+; X32-NEXT:    addb %dl, %dl
+; X32-NEXT:    decb %dl
+; X32-NEXT:    shrb %al
+; X32-NEXT:    mulb %dl
+; X32-NEXT:    addb %cl, %al
+; X32-NEXT:    retl
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_unsigned_reg_reg(i8 %a1, i8 %a2) nounwind {
+; X64-LABEL: scalar_i8_unsigned_reg_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    cmpb %al, %dil
+; X64-NEXT:    setbe %cl
+; X64-NEXT:    ja .LBB16_1
+; X64-NEXT:  # %bb.2:
+; X64-NEXT:    movl %edi, %edx
+; X64-NEXT:    jmp .LBB16_3
+; X64-NEXT:  .LBB16_1:
+; X64-NEXT:    movl %eax, %edx
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:  .LBB16_3:
+; X64-NEXT:    subb %dl, %al
+; X64-NEXT:    addb %cl, %cl
+; X64-NEXT:    decb %cl
+; X64-NEXT:    shrb %al
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    mulb %cl
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i8_unsigned_reg_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    cmpb %al, %cl
+; X32-NEXT:    setbe %dl
+; X32-NEXT:    ja .LBB16_1
+; X32-NEXT:  # %bb.2:
+; X32-NEXT:    movb %cl, %ah
+; X32-NEXT:    jmp .LBB16_3
+; X32-NEXT:  .LBB16_1:
+; X32-NEXT:    movb %al, %ah
+; X32-NEXT:    movb %cl, %al
+; X32-NEXT:  .LBB16_3:
+; X32-NEXT:    subb %ah, %al
+; X32-NEXT:    addb %dl, %dl
+; X32-NEXT:    decb %dl
+; X32-NEXT:    shrb %al
+; X32-NEXT:    mulb %dl
+; X32-NEXT:    addb %cl, %al
+; X32-NEXT:    retl
+  %t3 = icmp ugt i8 %a1, %a2
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul i8 %t8, %t4
+  %a10 = add i8 %t9, %a1
+  ret i8 %a10
+}
+
+; Values are loaded. Only check signed case.
+
+define i8 @scalar_i8_signed_mem_reg(i8* %a1_addr, i8 %a2) nounwind {
+; X64-LABEL: scalar_i8_signed_mem_reg:
+; X64:       # %bb.0:
+; X64-NEXT:    movb (%rdi), %cl
+; X64-NEXT:    cmpb %sil, %cl
+; X64-NEXT:    setle %dl
+; X64-NEXT:    movl %esi, %edi
+; X64-NEXT:    jg .LBB17_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    movl %ecx, %edi
+; X64-NEXT:  .LBB17_2:
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    jge .LBB17_4
+; X64-NEXT:  # %bb.3:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:  .LBB17_4:
+; X64-NEXT:    subb %dil, %al
+; X64-NEXT:    addb %dl, %dl
+; X64-NEXT:    decb %dl
+; X64-NEXT:    shrb %al
+; X64-NEXT:    mulb %dl
+; X64-NEXT:    addb %cl, %al
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i8_signed_mem_reg:
+; X32:       # %bb.0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %ah
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movb (%ecx), %cl
+; X32-NEXT:    cmpb %ah, %cl
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movb %ah, %ch
+; X32-NEXT:    jg .LBB17_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movb %cl, %ch
+; X32-NEXT:  .LBB17_2:
+; X32-NEXT:    movb %cl, %al
+; X32-NEXT:    jge .LBB17_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movb %ah, %al
+; X32-NEXT:  .LBB17_4:
+; X32-NEXT:    subb %ch, %al
+; X32-NEXT:    addb %dl, %dl
+; X32-NEXT:    decb %dl
+; X32-NEXT:    shrb %al
+; X32-NEXT:    mulb %dl
+; X32-NEXT:    addb %cl, %al
+; X32-NEXT:    retl
+  %a1 = load i8, i8* %a1_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_signed_reg_mem(i8 %a1, i8* %a2_addr) nounwind {
+; X64-LABEL: scalar_i8_signed_reg_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movb (%rsi), %dl
+; X64-NEXT:    cmpb %dl, %dil
+; X64-NEXT:    setle %cl
+; X64-NEXT:    movl %edx, %esi
+; X64-NEXT:    jg .LBB18_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    movl %edi, %esi
+; X64-NEXT:  .LBB18_2:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    jge .LBB18_4
+; X64-NEXT:  # %bb.3:
+; X64-NEXT:    movl %edx, %eax
+; X64-NEXT:  .LBB18_4:
+; X64-NEXT:    subb %sil, %al
+; X64-NEXT:    addb %cl, %cl
+; X64-NEXT:    decb %cl
+; X64-NEXT:    shrb %al
+; X64-NEXT:    mulb %cl
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i8_signed_reg_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    movb {{[0-9]+}}(%esp), %cl
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movb (%eax), %ah
+; X32-NEXT:    cmpb %ah, %cl
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movb %ah, %ch
+; X32-NEXT:    jg .LBB18_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movb %cl, %ch
+; X32-NEXT:  .LBB18_2:
+; X32-NEXT:    movb %cl, %al
+; X32-NEXT:    jge .LBB18_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movb %ah, %al
+; X32-NEXT:  .LBB18_4:
+; X32-NEXT:    subb %ch, %al
+; X32-NEXT:    addb %dl, %dl
+; X32-NEXT:    decb %dl
+; X32-NEXT:    shrb %al
+; X32-NEXT:    mulb %dl
+; X32-NEXT:    addb %cl, %al
+; X32-NEXT:    retl
+  %a2 = load i8, i8* %a2_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}
+
+define i8 @scalar_i8_signed_mem_mem(i8* %a1_addr, i8* %a2_addr) nounwind {
+; X64-LABEL: scalar_i8_signed_mem_mem:
+; X64:       # %bb.0:
+; X64-NEXT:    movb (%rdi), %dil
+; X64-NEXT:    movb (%rsi), %cl
+; X64-NEXT:    cmpb %cl, %dil
+; X64-NEXT:    setle %dl
+; X64-NEXT:    movl %ecx, %esi
+; X64-NEXT:    jg .LBB19_2
+; X64-NEXT:  # %bb.1:
+; X64-NEXT:    movl %edi, %esi
+; X64-NEXT:  .LBB19_2:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    jge .LBB19_4
+; X64-NEXT:  # %bb.3:
+; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:  .LBB19_4:
+; X64-NEXT:    subb %sil, %al
+; X64-NEXT:    addb %dl, %dl
+; X64-NEXT:    decb %dl
+; X64-NEXT:    shrb %al
+; X64-NEXT:    mulb %dl
+; X64-NEXT:    addb %dil, %al
+; X64-NEXT:    retq
+;
+; X32-LABEL: scalar_i8_signed_mem_mem:
+; X32:       # %bb.0:
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT:    movb (%ecx), %cl
+; X32-NEXT:    movb (%eax), %ah
+; X32-NEXT:    cmpb %ah, %cl
+; X32-NEXT:    setle %dl
+; X32-NEXT:    movb %ah, %ch
+; X32-NEXT:    jg .LBB19_2
+; X32-NEXT:  # %bb.1:
+; X32-NEXT:    movb %cl, %ch
+; X32-NEXT:  .LBB19_2:
+; X32-NEXT:    movb %cl, %al
+; X32-NEXT:    jge .LBB19_4
+; X32-NEXT:  # %bb.3:
+; X32-NEXT:    movb %ah, %al
+; X32-NEXT:  .LBB19_4:
+; X32-NEXT:    subb %ch, %al
+; X32-NEXT:    addb %dl, %dl
+; X32-NEXT:    decb %dl
+; X32-NEXT:    shrb %al
+; X32-NEXT:    mulb %dl
+; X32-NEXT:    addb %cl, %al
+; X32-NEXT:    retl
+  %a1 = load i8, i8* %a1_addr
+  %a2 = load i8, i8* %a2_addr
+  %t3 = icmp sgt i8 %a1, %a2 ; signed
+  %t4 = select i1 %t3, i8 -1, i8 1
+  %t5 = select i1 %t3, i8 %a2, i8 %a1
+  %t6 = select i1 %t3, i8 %a1, i8 %a2
+  %t7 = sub i8 %t6, %t5
+  %t8 = lshr i8 %t7, 1
+  %t9 = mul nsw i8 %t8, %t4 ; signed
+  %a10 = add nsw i8 %t9, %a1 ; signed
+  ret i8 %a10
+}




More information about the llvm-commits mailing list