[llvm] e887627 - [X86] midpoint-int-vec - cleanup common check prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 8 13:09:58 PST 2023


Author: Simon Pilgrim
Date: 2023-02-08T21:09:46Z
New Revision: e887627c00134d7180c3aef4e26bd49dbbec9e24

URL: https://github.com/llvm/llvm-project/commit/e887627c00134d7180c3aef4e26bd49dbbec9e24
DIFF: https://github.com/llvm/llvm-project/commit/e887627c00134d7180c3aef4e26bd49dbbec9e24.diff

LOG: [X86] midpoint-int-vec - cleanup common check prefixes

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
    llvm/test/CodeGen/X86/midpoint-int-vec-256.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
index bfcb9ce87d18..e1ecfae9b4cb 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-128.ll
@@ -1,12 +1,12 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1-FALLBACK
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2-FALLBACK
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=XOP,XOP-FALLBACK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512F
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512VL,AVX512VL-FALLBACK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW-FALLBACK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX512VL,AVX512VLBW
@@ -65,30 +65,30 @@ define <4 x i32> @vec128_i32_signed_reg_reg(<4 x i32> %a1, <4 x i32> %a2) nounwi
 ; SSE41-NEXT:    paddd %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i32_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i32_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -228,34 +228,34 @@ define <4 x i32> @vec128_i32_unsigned_reg_reg(<4 x i32> %a1, <4 x i32> %a2) noun
 ; SSE41-NEXT:    paddd %xmm4, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
-; AVX2-FALLBACK-NEXT:    vpor %xmm4, %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i32_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqd %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm4 = [1,1,1,1]
+; AVX2-NEXT:    vpor %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubd %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpmulld %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i32_unsigned_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -394,32 +394,32 @@ define <4 x i32> @vec128_i32_signed_mem_reg(ptr %a1_addr, <4 x i32> %a2) nounwin
 ; SSE41-NEXT:    paddd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i32_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i32_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vpcmpgtd %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; AVX2-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpsubd %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX2-NEXT:    vpmulld %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -561,32 +561,32 @@ define <4 x i32> @vec128_i32_signed_reg_mem(<4 x i32> %a1, ptr %a2_addr) nounwin
 ; SSE41-NEXT:    paddd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i32_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i32_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i32_signed_reg_mem:
 ; XOP-FALLBACK:       # %bb.0:
@@ -730,34 +730,34 @@ define <4 x i32> @vec128_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; SSE41-NEXT:    paddd %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
-; AVX2-FALLBACK-NEXT:    vpor %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i32_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i32_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-NEXT:    vpcmpgtd %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm3 = [1,1,1,1]
+; AVX2-NEXT:    vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpmulld %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i32_signed_mem_mem:
 ; XOP-FALLBACK:       # %bb.0:
@@ -947,43 +947,24 @@ define <2 x i64> @vec128_i64_signed_reg_reg(<2 x i64> %a1, <2 x i64> %a2) nounwi
 ; SSE41-NEXT:    paddq %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i64_signed_reg_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
+; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm2
+; AVX-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
+; AVX-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i64_signed_reg_reg:
 ; XOP:       # %bb.0:
@@ -1156,49 +1137,27 @@ define <2 x i64> @vec128_i64_unsigned_reg_reg(<2 x i64> %a1, <2 x i64> %a2) noun
 ; SSE41-NEXT:    paddq %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX1-FALLBACK-NEXT:    vpxor %xmm2, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpxor %xmm2, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i64_unsigned_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
-; AVX2-FALLBACK-NEXT:    vpxor %xmm2, %xmm1, %xmm3
-; AVX2-FALLBACK-NEXT:    vpxor %xmm2, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i64_unsigned_reg_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
+; AVX-NEXT:    vpxor %xmm2, %xmm1, %xmm3
+; AVX-NEXT:    vpxor %xmm2, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpgtq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
+; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm2
+; AVX-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
+; AVX-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i64_unsigned_reg_reg:
 ; XOP:       # %bb.0:
@@ -1375,45 +1334,25 @@ define <2 x i64> @vec128_i64_signed_mem_reg(ptr %a1_addr, <2 x i64> %a2) nounwin
 ; SSE41-NEXT:    paddq %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm4
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i64_signed_mem_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm4
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vblendvpd %xmm2, %xmm4, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlq $1, %xmm0, %xmm2
+; AVX-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; AVX-NEXT:    vpmuludq %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
+; AVX-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm1, %xmm2, %xmm1
+; AVX-NEXT:    vpaddq %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i64_signed_mem_reg:
 ; XOP:       # %bb.0:
@@ -1591,45 +1530,25 @@ define <2 x i64> @vec128_i64_signed_reg_mem(<2 x i64> %a1, ptr %a2_addr) nounwin
 ; SSE41-NEXT:    paddq %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i64_signed_reg_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i64_signed_reg_mem:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
+; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm2
+; AVX-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
+; AVX-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i64_signed_reg_mem:
 ; XOP:       # %bb.0:
@@ -1808,47 +1727,26 @@ define <2 x i64> @vec128_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; SSE41-NEXT:    paddq %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i64_signed_mem_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
-; AVX2-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlq $32, %xmm3, %xmm4
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i64_signed_mem_mem:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
+; AVX-NEXT:    vpsubq %xmm1, %xmm0, %xmm4
+; AVX-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX-NEXT:    vblendvpd %xmm2, %xmm4, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $1, %xmm1, %xmm2
+; AVX-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlq $32, %xmm3, %xmm4
+; AVX-NEXT:    vpmuludq %xmm4, %xmm2, %xmm4
+; AVX-NEXT:    vpaddq %xmm1, %xmm4, %xmm1
+; AVX-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX-NEXT:    vpmuludq %xmm3, %xmm2, %xmm2
+; AVX-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i64_signed_mem_mem:
 ; XOP:       # %bb.0:
@@ -1966,29 +1864,17 @@ define <8 x i16> @vec128_i16_signed_reg_reg(<8 x i16> %a1, <8 x i16> %a2) nounwi
 ; SSE-NEXT:    paddw %xmm2, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i16_signed_reg_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i16_signed_reg_reg:
 ; XOP:       # %bb.0:
@@ -2100,33 +1986,19 @@ define <8 x i16> @vec128_i16_unsigned_reg_reg(<8 x i16> %a1, <8 x i16> %a2) noun
 ; SSE41-NEXT:    paddw %xmm4, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i16_unsigned_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i16_unsigned_reg_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpminuw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpcmpeqw %xmm2, %xmm0, %xmm3
+; AVX-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpsubw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i16_unsigned_reg_reg:
 ; XOP:       # %bb.0:
@@ -2223,31 +2095,18 @@ define <8 x i16> @vec128_i16_signed_mem_reg(ptr %a1_addr, <8 x i16> %a2) nounwin
 ; SSE-NEXT:    paddw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i16_signed_mem_reg:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX-NEXT:    vpcmpgtw %xmm0, %xmm1, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpminsw %xmm0, %xmm1, %xmm3
+; AVX-NEXT:    vpmaxsw %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    vpsubw %xmm3, %xmm0, %xmm0
+; AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX-NEXT:    vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i16_signed_mem_reg:
 ; XOP:       # %bb.0:
@@ -2344,31 +2203,18 @@ define <8 x i16> @vec128_i16_signed_reg_mem(<8 x i16> %a1, ptr %a2_addr) nounwin
 ; SSE-NEXT:    paddw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i16_signed_reg_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i16_signed_reg_mem:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i16_signed_reg_mem:
 ; XOP:       # %bb.0:
@@ -2466,33 +2312,19 @@ define <8 x i16> @vec128_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; SSE-NEXT:    paddw %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i16_signed_mem_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    retq
+; AVX-LABEL: vec128_i16_signed_mem_mem:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm2
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX-NEXT:    vpminsw %xmm1, %xmm0, %xmm3
+; AVX-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX-NEXT:    vpsubw %xmm3, %xmm1, %xmm1
+; AVX-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT:    retq
 ;
 ; XOP-LABEL: vec128_i16_signed_mem_mem:
 ; XOP:       # %bb.0:
@@ -2643,46 +2475,46 @@ define <16 x i8> @vec128_i8_signed_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounwin
 ; SSE41-NEXT:    paddb %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vzeroupper
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i8_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i8_signed_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -2870,50 +2702,50 @@ define <16 x i8> @vec128_i8_unsigned_reg_reg(<16 x i8> %a1, <16 x i8> %a2) nounw
 ; SSE41-NEXT:    paddb %xmm2, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
-; AVX2-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
-; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vzeroupper
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmullw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i8_unsigned_reg_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpminub %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpcmpeqb %xmm2, %xmm0, %xmm3
+; AVX2-NEXT:    vpcmpeqd %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpxor %xmm4, %xmm3, %xmm3
+; AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3
+; AVX2-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i8_unsigned_reg_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -3113,48 +2945,48 @@ define <16 x i8> @vec128_i8_signed_mem_reg(ptr %a1_addr, <16 x i8> %a2) nounwind
 ; SSE41-NEXT:    movdqa %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
-; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
-; AVX2-FALLBACK-NEXT:    vzeroupper
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i8_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i8_signed_mem_reg:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm2
+; AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpminsb %xmm0, %xmm1, %xmm3
+; AVX2-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vpsubb %xmm3, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_reg:
 ; XOP-FALLBACK:       # %bb.0:
@@ -3355,48 +3187,48 @@ define <16 x i8> @vec128_i8_signed_reg_mem(<16 x i8> %a1, ptr %a2_addr) nounwind
 ; SSE41-NEXT:    paddb %xmm3, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vzeroupper
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i8_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i8_signed_reg_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX2-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i8_signed_reg_mem:
 ; XOP-FALLBACK:       # %bb.0:
@@ -3599,50 +3431,50 @@ define <16 x i8> @vec128_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; SSE41-NEXT:    paddb %xmm1, %xmm0
 ; SSE41-NEXT:    retq
 ;
-; AVX1-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    retq
-;
-; AVX2-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
-; AVX2-FALLBACK:       # %bb.0:
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX2-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm1
-; AVX2-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
-; AVX2-FALLBACK-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
-; AVX2-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; AVX2-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
-; AVX2-FALLBACK-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX2-FALLBACK-NEXT:    vextracti128 $1, %ymm1, %xmm2
-; AVX2-FALLBACK-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
-; AVX2-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX2-FALLBACK-NEXT:    vzeroupper
-; AVX2-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec128_i8_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm3 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmullw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: vec128_i8_signed_mem_mem:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa (%rsi), %xmm1
+; AVX2-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm2
+; AVX2-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
+; AVX2-NEXT:    vpminsb %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX2-NEXT:    vpsubb %xmm3, %xmm1, %xmm1
+; AVX2-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX2-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
+; AVX2-NEXT:    vpmullw %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpackuswb %xmm2, %xmm1, %xmm1
+; AVX2-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX2-NEXT:    vzeroupper
+; AVX2-NEXT:    retq
 ;
 ; XOP-FALLBACK-LABEL: vec128_i8_signed_mem_mem:
 ; XOP-FALLBACK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
index da255045018c..424274361450 100644
--- a/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int-vec-256.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1-FALLBACK
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefix=XOP-FALLBACK
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=XOPAVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefixes=XOP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=AVX2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL,AVX512VL-FALLBACK
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW-FALLBACK
@@ -21,24 +21,24 @@
 ; Values come from regs
 
 define <8 x i32> @vec256_i32_signed_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i32_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i32_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_signed_reg_reg:
 ; AVX2:       # %bb.0:
@@ -50,39 +50,22 @@ define <8 x i32> @vec256_i32_signed_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwi
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i32_signed_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i32_signed_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i32_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_signed_reg_reg:
 ; AVX512:       # %bb.0:
@@ -105,24 +88,24 @@ define <8 x i32> @vec256_i32_signed_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwi
 }
 
 define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i32_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpminud %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i32_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpsubd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminud %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxud %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_unsigned_reg_reg:
 ; AVX2:       # %bb.0:
@@ -134,39 +117,22 @@ define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) noun
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i32_unsigned_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpminud %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminud %xmm1, %xmm0, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i32_unsigned_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpminud %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminud %xmm1, %xmm0, %xmm4
-; XOPAVX1-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i32_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpminud %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxud %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpminud %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vpmaxud %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_unsigned_reg_reg:
 ; AVX512:       # %bb.0:
@@ -191,25 +157,25 @@ define <8 x i32> @vec256_i32_unsigned_reg_reg(<8 x i32> %a1, <8 x i32> %a2) noun
 ; Values are loaded. Only check signed case.
 
 define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i32_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm3, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i32_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vpminsd %xmm0, %xmm1, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm1, %xmm4
+; AVX1-NEXT:    vpsubd %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT:    vpmulld %xmm3, %xmm3, %xmm3
+; AVX1-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_signed_mem_reg:
 ; AVX2:       # %bb.0:
@@ -222,41 +188,23 @@ define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwin
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i32_signed_mem_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i32_signed_mem_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i32_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; XOP-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_signed_mem_reg:
 ; AVX512:       # %bb.0:
@@ -281,25 +229,25 @@ define <8 x i32> @vec256_i32_signed_mem_reg(ptr %a1_addr, <8 x i32> %a2) nounwin
 }
 
 define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i32_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i32_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpmulld %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_signed_reg_mem:
 ; AVX2:       # %bb.0:
@@ -312,41 +260,23 @@ define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, ptr %a2_addr) nounwin
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i32_signed_reg_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i32_signed_reg_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrld $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i32_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpminsd %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubd %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpminsd %xmm1, %xmm0, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrld $1, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm2, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsdd %xmm0, %xmm1, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_signed_reg_mem:
 ; AVX512:       # %bb.0:
@@ -371,26 +301,26 @@ define <8 x i32> @vec256_i32_signed_reg_mem(<8 x i32> %a1, ptr %a2_addr) nounwin
 }
 
 define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i32_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i32_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; AVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpmulld %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vpmulld %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i32_signed_mem_mem:
 ; AVX2:       # %bb.0:
@@ -404,43 +334,24 @@ define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i32_signed_mem_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; XOP-FALLBACK-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i32_signed_mem_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
-; XOPAVX1-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrld $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i32_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpminsd %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpsubd %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpminsd %xmm0, %xmm2, %xmm4
+; XOP-NEXT:    vpmaxsd %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpsubd %xmm4, %xmm0, %xmm0
+; XOP-NEXT:    vpsrld $1, %xmm0, %xmm0
+; XOP-NEXT:    vpsrld $1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsdd %xmm3, %xmm1, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsdd %xmm2, %xmm0, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512-LABEL: vec256_i32_signed_mem_mem:
 ; AVX512:       # %bb.0:
@@ -473,43 +384,43 @@ define <8 x i32> @vec256_i32_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; Values come from regs
 
 define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i64_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_reg_reg:
 ; AVX2:       # %bb.0:
@@ -531,81 +442,43 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i64_signed_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOPAVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOPAVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i64_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; XOP-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; XOP-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; XOP-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; XOP-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; XOP-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -676,48 +549,48 @@ define <4 x i64> @vec256_i64_signed_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwi
 }
 
 define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm3, %xmm5
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpxor %xmm4, %xmm0, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm9
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm7, %xmm4
-; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm7, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i64_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808]
+; AVX1-NEXT:    vpxor %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpxor %xmm4, %xmm2, %xmm6
+; AVX1-NEXT:    vpcmpgtq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpxor %xmm4, %xmm1, %xmm6
+; AVX1-NEXT:    vpxor %xmm4, %xmm0, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm3
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlq $1, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm9
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm7, %xmm4
+; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpsrlq $33, %xmm3, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpaddq %xmm3, %xmm7, %xmm3
+; AVX1-NEXT:    vpsllq $32, %xmm3, %xmm3
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_unsigned_reg_reg:
 ; AVX2:       # %bb.0:
@@ -742,81 +615,43 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i64_unsigned_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i64_unsigned_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtuq %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOPAVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOPAVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i64_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtuq %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtuq %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; XOP-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; XOP-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; XOP-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; XOP-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; XOP-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_unsigned_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -889,44 +724,44 @@ define <4 x i64> @vec256_i64_unsigned_reg_reg(<4 x i64> %a1, <4 x i64> %a2) noun
 ; Values are loaded. Only check signed case.
 
 define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i64_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
+; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_reg:
 ; AVX2:       # %bb.0:
@@ -949,83 +784,44 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 ; AVX2-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i64_signed_mem_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; XOPAVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; XOPAVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i64_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
+; XOP-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
+; XOP-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
+; XOP-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm0, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; XOP-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; XOP-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
+; XOP-NEXT:    vpsllq $32, %xmm0, %xmm0
+; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; XOP-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; XOP-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; XOP-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_reg:
 ; AVX512F:       # %bb.0:
@@ -1098,44 +894,44 @@ define <4 x i64> @vec256_i64_signed_mem_reg(ptr %a1_addr, <4 x i64> %a2) nounwin
 }
 
 define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i64_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; AVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_reg_mem:
 ; AVX2:       # %bb.0:
@@ -1158,83 +954,44 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i64_signed_reg_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i64_signed_reg_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
-; XOPAVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
-; XOPAVX1-NEXT:    vpsllq $32, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i64_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtq %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpsubq %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpsubq %xmm0, %xmm1, %xmm1
+; XOP-NEXT:    vblendvpd %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsubq %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpsubq %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $1, %xmm2, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; XOP-NEXT:    vpmuludq %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; XOP-NEXT:    vpaddq %xmm1, %xmm9, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; XOP-NEXT:    vpsrlq $33, %xmm2, %xmm2
+; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; XOP-NEXT:    vpmuludq %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; XOP-NEXT:    vpaddq %xmm2, %xmm7, %xmm2
+; XOP-NEXT:    vpsllq $32, %xmm2, %xmm2
+; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; XOP-NEXT:    vpaddq %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm5, %xmm0
+; XOP-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_reg_mem:
 ; AVX512F:       # %bb.0:
@@ -1307,45 +1064,45 @@ define <4 x i64> @vec256_i64_signed_reg_mem(<4 x i64> %a1, ptr %a2_addr) nounwin
 }
 
 define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; AVX1-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i64_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vpcmpgtq %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtq %xmm0, %xmm2, %xmm5
+; AVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
+; AVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
+; AVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; AVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
+; AVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; AVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; AVX1-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
+; AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
+; AVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; AVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; AVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; AVX1-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
+; AVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; AVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i64_signed_mem_mem:
 ; AVX2:       # %bb.0:
@@ -1369,85 +1126,45 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX2-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i64_signed_mem_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOP-FALLBACK-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; XOP-FALLBACK-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOP-FALLBACK-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOP-FALLBACK-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i64_signed_mem_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vpsrlq $1, %xmm0, %xmm7
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
-; XOPAVX1-NEXT:    vpor %xmm5, %xmm8, %xmm5
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm5, %xmm9
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
-; XOPAVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
-; XOPAVX1-NEXT:    vpsrlq $33, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpor %xmm4, %xmm8, %xmm4
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlq $32, %xmm4, %xmm7
-; XOPAVX1-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
-; XOPAVX1-NEXT:    vpsllq $32, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
-; XOPAVX1-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
-; XOPAVX1-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i64_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtq %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtq %xmm0, %xmm2, %xmm5
+; XOP-NEXT:    vpsubq %xmm0, %xmm2, %xmm6
+; XOP-NEXT:    vpsubq %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vblendvpd %xmm5, %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpsubq %xmm1, %xmm3, %xmm6
+; XOP-NEXT:    vpsubq %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vblendvpd %xmm4, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $1, %xmm1, %xmm6
+; XOP-NEXT:    vpsrlq $1, %xmm0, %xmm7
+; XOP-NEXT:    vpsrlq $33, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm8 = [1,1]
+; XOP-NEXT:    vpor %xmm5, %xmm8, %xmm5
+; XOP-NEXT:    vpmuludq %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlq $32, %xmm5, %xmm9
+; XOP-NEXT:    vpmuludq %xmm7, %xmm9, %xmm9
+; XOP-NEXT:    vpaddq %xmm0, %xmm9, %xmm0
+; XOP-NEXT:    vpsllq $32, %xmm0, %xmm0
+; XOP-NEXT:    vpmuludq %xmm5, %xmm7, %xmm5
+; XOP-NEXT:    vpsrlq $33, %xmm1, %xmm1
+; XOP-NEXT:    vpor %xmm4, %xmm8, %xmm4
+; XOP-NEXT:    vpmuludq %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlq $32, %xmm4, %xmm7
+; XOP-NEXT:    vpmuludq %xmm7, %xmm6, %xmm7
+; XOP-NEXT:    vpaddq %xmm1, %xmm7, %xmm1
+; XOP-NEXT:    vpsllq $32, %xmm1, %xmm1
+; XOP-NEXT:    vpmuludq %xmm4, %xmm6, %xmm4
+; XOP-NEXT:    vpaddq %xmm3, %xmm4, %xmm3
+; XOP-NEXT:    vpaddq %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpaddq %xmm2, %xmm5, %xmm2
+; XOP-NEXT:    vpaddq %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i64_signed_mem_mem:
 ; AVX512F:       # %bb.0:
@@ -1528,29 +1245,29 @@ define <4 x i64> @vec256_i64_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; Values come from regs
 
 define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i16_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_reg_reg:
 ; AVX2:       # %bb.0:
@@ -1564,49 +1281,27 @@ define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nou
 ; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i16_signed_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i16_signed_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i16_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -1671,32 +1366,32 @@ define <16 x i16> @vec256_i16_signed_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nou
 }
 
 define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpminuw %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vpcmpeqw %xmm7, %xmm0, %xmm8
-; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpor %xmm4, %xmm5, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i16_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpeqw %xmm4, %xmm3, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm7
+; AVX1-NEXT:    vpcmpeqw %xmm7, %xmm0, %xmm8
+; AVX1-NEXT:    vpxor %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubw %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm4, %xmm6, %xmm6
+; AVX1-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_unsigned_reg_reg:
 ; AVX2:       # %bb.0:
@@ -1712,49 +1407,27 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
 ; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i16_unsigned_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtuw %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminuw %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminuw %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i16_unsigned_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtuw %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminuw %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminuw %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i16_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtuw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtuw %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminuw %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxuw %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpminuw %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxuw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_unsigned_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -1823,30 +1496,30 @@ define <16 x i16> @vec256_i16_unsigned_reg_reg(<16 x i16> %a1, <16 x i16> %a2) n
 ; Values are loaded. Only check signed case.
 
 define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i16_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
+; AVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_mem_reg:
 ; AVX2:       # %bb.0:
@@ -1861,51 +1534,28 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
 ; AVX2-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i16_signed_mem_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i16_signed_mem_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i16_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOP-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_mem_reg:
 ; AVX512F:       # %bb.0:
@@ -1974,30 +1624,30 @@ define <16 x i16> @vec256_i16_signed_mem_reg(ptr %a1_addr, <16 x i16> %a2) nounw
 }
 
 define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i16_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddw %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_reg_mem:
 ; AVX2:       # %bb.0:
@@ -2012,51 +1662,28 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
 ; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i16_signed_reg_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i16_signed_reg_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i16_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtw %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsw %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubw %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpminsw %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpmacsww %xmm0, %xmm5, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_reg_mem:
 ; AVX512F:       # %bb.0:
@@ -2125,31 +1752,31 @@ define <16 x i16> @vec256_i16_signed_reg_mem(<16 x i16> %a1, ptr %a2_addr) nounw
 }
 
 define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i16_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vpcmpgtw %xmm1, %xmm3, %xmm4
+; AVX1-NEXT:    vpcmpgtw %xmm0, %xmm2, %xmm5
+; AVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; AVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddw %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpaddw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i16_signed_mem_mem:
 ; AVX2:       # %bb.0:
@@ -2165,53 +1792,29 @@ define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwin
 ; AVX2-NEXT:    vpaddw %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i16_signed_mem_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i16_signed_mem_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpor %xmm6, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i16_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtw %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtw %xmm0, %xmm2, %xmm5
+; XOP-NEXT:    vpminsw %xmm1, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpsubw %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsw %xmm0, %xmm2, %xmm6
+; XOP-NEXT:    vpmaxsw %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpsubw %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; XOP-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; XOP-NEXT:    vpor %xmm6, %xmm4, %xmm4
+; XOP-NEXT:    vpmacsww %xmm3, %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpmacsww %xmm2, %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i16_signed_mem_mem:
 ; AVX512F:       # %bb.0:
@@ -2290,49 +1893,49 @@ define <16 x i16> @vec256_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwin
 ; Values come from regs
 
 define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm1, %xmm8, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpand %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i8_signed_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
+; AVX1-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_signed_reg_reg:
 ; AVX2:       # %bb.0:
@@ -2356,81 +1959,43 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
 ; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i8_signed_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i8_signed_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i8_signed_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -2510,52 +2075,52 @@ define <32 x i8> @vec256_i8_signed_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwin
 }
 
 define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminub %xmm3, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm4, %xmm2, %xmm5
-; AVX1-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm7
-; AVX1-FALLBACK-NEXT:    vpcmpeqb %xmm7, %xmm0, %xmm8
-; AVX1-FALLBACK-NEXT:    vpxor %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpmaxub %xmm3, %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm6, %xmm6
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpand %xmm1, %xmm8, %xmm1
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm1, %xmm1
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpand %xmm4, %xmm8, %xmm4
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i8_unsigned_reg_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT:    vpminub %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpeqb %xmm4, %xmm2, %xmm5
+; AVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; AVX1-NEXT:    vpxor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm7
+; AVX1-NEXT:    vpcmpeqb %xmm7, %xmm0, %xmm8
+; AVX1-NEXT:    vpxor %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; AVX1-NEXT:    vpsubb %xmm7, %xmm1, %xmm1
+; AVX1-NEXT:    vpmaxub %xmm3, %xmm2, %xmm3
+; AVX1-NEXT:    vpsubb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero
+; AVX1-NEXT:    vpmullw %xmm6, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm1, %xmm8, %xmm1
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm4 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm4, %xmm4
+; AVX1-NEXT:    vpand %xmm4, %xmm8, %xmm4
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-NEXT:    vpmullw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_unsigned_reg_reg:
 ; AVX2:       # %bb.0:
@@ -2581,81 +2146,43 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
 ; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i8_unsigned_reg_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtub %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminub %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminub %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i8_unsigned_reg_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtub %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminub %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i8_unsigned_reg_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtub %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtub %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminub %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxub %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminub %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxub %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_unsigned_reg_reg:
 ; AVX512F:       # %bb.0:
@@ -2739,50 +2266,50 @@ define <32 x i8> @vec256_i8_unsigned_reg_reg(<32 x i8> %a1, <32 x i8> %a2) nounw
 ; Values are loaded. Only check signed case.
 
 define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpand %xmm0, %xmm8, %xmm0
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm6, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpand %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i8_signed_mem_reg:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm1
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
+; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm2, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm5
+; AVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; AVX1-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpminsb %xmm3, %xmm2, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm3, %xmm2, %xmm3
+; AVX1-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT:    vpackuswb %xmm6, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_signed_mem_reg:
 ; AVX2:       # %bb.0:
@@ -2807,83 +2334,44 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
 ; AVX2-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i8_signed_mem_reg:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i8_signed_mem_reg:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
-; XOPAVX1-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i8_signed_mem_reg:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm0, %xmm1, %xmm5
+; XOP-NEXT:    vpminsb %xmm0, %xmm1, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm1, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_mem_reg:
 ; AVX512F:       # %bb.0:
@@ -2967,50 +2455,50 @@ define <32 x i8> @vec256_i8_signed_mem_reg(ptr %a1_addr, <32 x i8> %a2) nounwind
 }
 
 define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; AVX1-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpand %xmm2, %xmm8, %xmm2
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpand %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i8_signed_reg_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_signed_reg_mem:
 ; AVX2:       # %bb.0:
@@ -3035,83 +2523,44 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
 ; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i8_signed_reg_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOP-FALLBACK-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i8_signed_reg_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm2
-; XOPAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; XOPAVX1-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
-; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i8_signed_reg_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rdi), %xmm1
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm2
+; XOP-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; XOP-NEXT:    vpcomgtb %xmm2, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm1, %xmm0, %xmm5
+; XOP-NEXT:    vpminsb %xmm1, %xmm0, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm1, %xmm0, %xmm1
+; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpminsb %xmm2, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm2, %xmm3, %xmm2
+; XOP-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-NEXT:    vpmullw %xmm5, %xmm1, %xmm1
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; XOP-NEXT:    vpmullw %xmm4, %xmm2, %xmm2
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm3, %xmm2, %xmm2
+; XOP-NEXT:    vpaddb %xmm0, %xmm1, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_reg_mem:
 ; AVX512F:       # %bb.0:
@@ -3195,51 +2644,51 @@ define <32 x i8> @vec256_i8_signed_reg_mem(<32 x i8> %a1, ptr %a2_addr) nounwind
 }
 
 define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
-; AVX1-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
-; AVX1-FALLBACK:       # %bb.0:
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm2
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm0
-; AVX1-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm1
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm4
-; AVX1-FALLBACK-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpsrlw $1, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
-; AVX1-FALLBACK-NEXT:    vpand %xmm6, %xmm8, %xmm6
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpand %xmm2, %xmm8, %xmm2
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm6, %xmm2, %xmm2
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; AVX1-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
-; AVX1-FALLBACK-NEXT:    vpand %xmm5, %xmm8, %xmm5
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
-; AVX1-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; AVX1-FALLBACK-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpand %xmm3, %xmm8, %xmm3
-; AVX1-FALLBACK-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
-; AVX1-FALLBACK-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
-; AVX1-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-FALLBACK-NEXT:    retq
+; AVX1-LABEL: vec256_i8_signed_mem_mem:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vmovdqa (%rsi), %xmm2
+; AVX1-NEXT:    vmovdqa 16(%rsi), %xmm3
+; AVX1-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX1-NEXT:    vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT:    vpcmpgtb %xmm3, %xmm1, %xmm4
+; AVX1-NEXT:    vpcmpgtb %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vpminsb %xmm2, %xmm0, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm2, %xmm0, %xmm2
+; AVX1-NEXT:    vpsubb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpminsb %xmm3, %xmm1, %xmm6
+; AVX1-NEXT:    vpmaxsb %xmm3, %xmm1, %xmm3
+; AVX1-NEXT:    vpsubb %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm3, %xmm3
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
+; AVX1-NEXT:    vpand %xmm6, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $1, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; AVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
+; AVX1-NEXT:    vpand %xmm6, %xmm8, %xmm6
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; AVX1-NEXT:    vpmullw %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vpand %xmm2, %xmm8, %xmm2
+; AVX1-NEXT:    vpackuswb %xmm6, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm5 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; AVX1-NEXT:    vpmullw %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm5, %xmm8, %xmm5
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
+; AVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; AVX1-NEXT:    vpmullw %xmm4, %xmm3, %xmm3
+; AVX1-NEXT:    vpand %xmm3, %xmm8, %xmm3
+; AVX1-NEXT:    vpackuswb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpaddb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpaddb %xmm0, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: vec256_i8_signed_mem_mem:
 ; AVX2:       # %bb.0:
@@ -3265,85 +2714,45 @@ define <32 x i8> @vec256_i8_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind
 ; AVX2-NEXT:    vpaddb %ymm0, %ymm1, %ymm0
 ; AVX2-NEXT:    retq
 ;
-; XOP-FALLBACK-LABEL: vec256_i8_signed_mem_mem:
-; XOP-FALLBACK:       # %bb.0:
-; XOP-FALLBACK-NEXT:    vmovdqa (%rsi), %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOP-FALLBACK-NEXT:    vmovdqa (%rdi), %xmm2
-; XOP-FALLBACK-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm4
-; XOP-FALLBACK-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
-; XOP-FALLBACK-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
-; XOP-FALLBACK-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
-; XOP-FALLBACK-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOP-FALLBACK-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOP-FALLBACK-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOP-FALLBACK-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOP-FALLBACK-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; XOP-FALLBACK-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; XOP-FALLBACK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOP-FALLBACK-NEXT:    retq
-;
-; XOPAVX1-LABEL: vec256_i8_signed_mem_mem:
-; XOPAVX1:       # %bb.0:
-; XOPAVX1-NEXT:    vmovdqa (%rsi), %xmm0
-; XOPAVX1-NEXT:    vmovdqa 16(%rsi), %xmm1
-; XOPAVX1-NEXT:    vmovdqa (%rdi), %xmm2
-; XOPAVX1-NEXT:    vmovdqa 16(%rdi), %xmm3
-; XOPAVX1-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm4
-; XOPAVX1-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
-; XOPAVX1-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
-; XOPAVX1-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
-; XOPAVX1-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm5, %xmm5
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpor %xmm7, %xmm4, %xmm4
-; XOPAVX1-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; XOPAVX1-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; XOPAVX1-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
-; XOPAVX1-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
-; XOPAVX1-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; XOPAVX1-NEXT:    retq
+; XOP-LABEL: vec256_i8_signed_mem_mem:
+; XOP:       # %bb.0:
+; XOP-NEXT:    vmovdqa (%rsi), %xmm0
+; XOP-NEXT:    vmovdqa 16(%rsi), %xmm1
+; XOP-NEXT:    vmovdqa (%rdi), %xmm2
+; XOP-NEXT:    vmovdqa 16(%rdi), %xmm3
+; XOP-NEXT:    vpcomgtb %xmm1, %xmm3, %xmm4
+; XOP-NEXT:    vpcomgtb %xmm0, %xmm2, %xmm5
+; XOP-NEXT:    vpminsb %xmm0, %xmm2, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm0, %xmm2, %xmm0
+; XOP-NEXT:    vpsubb %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpminsb %xmm1, %xmm3, %xmm6
+; XOP-NEXT:    vpmaxsb %xmm1, %xmm3, %xmm1
+; XOP-NEXT:    vpsubb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpcmpeqd %xmm6, %xmm6, %xmm6
+; XOP-NEXT:    vpshlb %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpshlb %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
+; XOP-NEXT:    vpor %xmm7, %xmm5, %xmm5
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm8 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm6, %xmm8, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero,xmm5[4],zero,xmm5[5],zero,xmm5[6],zero,xmm5[7],zero
+; XOP-NEXT:    vpmullw %xmm5, %xmm0, %xmm0
+; XOP-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30]
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm0, %xmm0
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm6 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpor %xmm7, %xmm4, %xmm4
+; XOP-NEXT:    vpunpckhbw {{.*#+}} xmm7 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; XOP-NEXT:    vpmullw %xmm7, %xmm6, %xmm6
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; XOP-NEXT:    vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
+; XOP-NEXT:    vpmullw %xmm4, %xmm1, %xmm1
+; XOP-NEXT:    vpperm %xmm5, %xmm6, %xmm1, %xmm1
+; XOP-NEXT:    vpaddb %xmm3, %xmm1, %xmm1
+; XOP-NEXT:    vpaddb %xmm2, %xmm0, %xmm0
+; XOP-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOP-NEXT:    retq
 ;
 ; AVX512F-LABEL: vec256_i8_signed_mem_mem:
 ; AVX512F:       # %bb.0:


        


More information about the llvm-commits mailing list