[llvm] [X86] Add 256-bit and 512-bit CLMULR and CLMULH test coverage (PR #177561)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 23 02:13:12 PST 2026


https://github.com/RKSimon created https://github.com/llvm/llvm-project/pull/177561

None

>From 4ae1f12feb7dc38e822eef35eb639432fba9adbb Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Fri, 23 Jan 2026 10:12:35 +0000
Subject: [PATCH] [X86] Add 256-bit and 512-bit CLMULR and CLMULH test coverage

---
 llvm/test/CodeGen/X86/clmul-vector-256.ll | 1844 +++++++++++++++++++++
 llvm/test/CodeGen/X86/clmul-vector-512.ll | 1598 +++++++++++++++++-
 2 files changed, 3439 insertions(+), 3 deletions(-)

diff --git a/llvm/test/CodeGen/X86/clmul-vector-256.ll b/llvm/test/CodeGen/X86/clmul-vector-256.ll
index b020704b63a5b..303aeeae3f5df 100644
--- a/llvm/test/CodeGen/X86/clmul-vector-256.ll
+++ b/llvm/test/CodeGen/X86/clmul-vector-256.ll
@@ -608,3 +608,1847 @@ define <4 x i64> @clmul_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
   %res = call <4 x i64> @llvm.clmul.v4i64(<4 x i64> %a, <4 x i64> %b)
   ret <4 x i64> %res
 }
+
+define <32 x i8> @clmulr_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
+; AVX1-LABEL: clmulr_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm5
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpmullw %xmm1, %xmm9, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm7
+; AVX1-NEXT:    vpshufb %xmm7, %xmm3, %xmm7
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm7
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps %ymm7, %ymm6, %ymm10
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT:    vpandn %xmm8, %xmm6, %xmm8
+; AVX1-NEXT:    vpmaddubsw %xmm8, %xmm0, %xmm8
+; AVX1-NEXT:    vpsllw $8, %xmm8, %xmm8
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm1, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vorps %ymm8, %ymm10, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vxorps %ymm8, %ymm9, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm12
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm10, %ymm12, %ymm10
+; AVX1-NEXT:    vxorps %ymm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm12
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm10, %ymm12, %ymm10
+; AVX1-NEXT:    vxorps %ymm10, %ymm9, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm7
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm7, %ymm10, %ymm7
+; AVX1-NEXT:    vxorps %ymm7, %ymm9, %ymm7
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm8
+; AVX1-NEXT:    vpmullw %xmm1, %xmm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm10, %ymm9
+; AVX1-NEXT:    vandps %ymm6, %ymm9, %ymm9
+; AVX1-NEXT:    vpandn %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vpandn %xmm8, %xmm6, %xmm5
+; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm7, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm5
+; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulr_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX2-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm5
+; AVX2-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm8
+; AVX2-NEXT:    vpbroadcastw {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm5, %ymm8, %ymm8
+; AVX2-NEXT:    vpandn %ymm7, %ymm5, %ymm7
+; AVX2-NEXT:    vpmaddubsw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX2-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm6, %ymm5, %ymm6
+; AVX2-NEXT:    vpmaddubsw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpsllw $8, %ymm6, %ymm6
+; AVX2-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpxor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm8
+; AVX2-NEXT:    vpand %ymm5, %ymm8, %ymm8
+; AVX2-NEXT:    vpandn %ymm7, %ymm5, %ymm7
+; AVX2-NEXT:    vpmaddubsw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX2-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm1, %ymm4, %ymm7
+; AVX2-NEXT:    vpand %ymm5, %ymm7, %ymm7
+; AVX2-NEXT:    vpandn %ymm1, %ymm5, %ymm1
+; AVX2-NEXT:    vpmaddubsw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm6, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm4
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulr_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm4
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vpmullw %ymm4, %ymm1, %ymm4
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512-NEXT:    vpxorq %zmm3, %zmm4, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm5
+; AVX512-NEXT:    vextracti64x4 $1, %zmm5, %ymm6
+; AVX512-NEXT:    vpmullw %ymm6, %ymm1, %ymm6
+; AVX512-NEXT:    vpmullw %ymm5, %ymm0, %ymm5
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm5 = zmm5 ^ zmm3 ^ zmm4
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm4
+; AVX512-NEXT:    vpmullw %ymm4, %ymm1, %ymm4
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm6
+; AVX512-NEXT:    vpmullw %ymm6, %ymm1, %ymm6
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm4 = zmm4 ^ zmm5 ^ zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
+; AVX512-NEXT:    vextracti64x4 $1, %zmm2, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm4 ^ zmm3
+; AVX512-NEXT:    vpsrlw $7, %ymm0, %ymm1
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT:    vpsrlw $7, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <32 x i8> %a to <32 x i16>
+  %b.ext = zext <32 x i8> %b to <32 x i16>
+  %clmul = call <32 x i16> @llvm.clmul.v32i16(<32 x i16> %a.ext, <32 x i16> %b.ext)
+  %res.ext = lshr <32 x i16> %clmul, splat (i16 7)
+  %res = trunc <32 x i16> %res.ext to <32 x i8>
+  ret <32 x i8> %res
+}
+
+define <16 x i16> @clmulr_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
+; AVX1-LABEL: clmulr_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm5
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX1-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
+; AVX1-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm6, %xmm9
+; AVX1-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
+; AVX1-NEXT:    vpsrlw $4, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm8
+; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm9
+; AVX1-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm9, %xmm0
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm7
+; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm6
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm8, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm7, %ymm0
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulr_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm0, %ymm3, %ymm4
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm2, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm3, %ymm3
+; AVX2-NEXT:    vpand %ymm0, %ymm3, %ymm6
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX2-NEXT:    vpor %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX2-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpxor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpand %ymm0, %ymm5, %ymm5
+; AVX2-NEXT:    vpshufb %ymm5, %ymm2, %ymm5
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpsllw $8, %ymm6, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX2-NEXT:    vpmullw %ymm4, %ymm9, %ymm9
+; AVX2-NEXT:    vpxor %ymm9, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX2-NEXT:    vpmullw %ymm4, %ymm9, %ymm9
+; AVX2-NEXT:    vpxor %ymm9, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm8, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm6, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
+; AVX2-NEXT:    vpmullw %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm6, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulr_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm2
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm2
+; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm3
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm4
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm3, %xmm4, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm3
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm3, %xmm4, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm3
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpsrld $15, %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <16 x i16> %a to <16 x i32>
+  %b.ext = zext <16 x i16> %b to <16 x i32>
+  %clmul = call <16 x i32> @llvm.clmul.v16i32(<16 x i32> %a.ext, <16 x i32> %b.ext)
+  %res.ext = lshr <16 x i32> %clmul, splat (i32 15)
+  %res = trunc <16 x i32> %res.ext to <16 x i16>
+  ret <16 x i16> %res
+}
+
+define <8 x i32> @clmulr_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
+; AVX1-LABEL: clmulr_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm4[2,3,4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm3 = xmm0[0,1],xmm4[2,3,4,5,6,7]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vmovq %xmm2, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vmovd %eax, %xmm2
+; AVX1-NEXT:    vpsrlq $32, %xmm1, %xmm3
+; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm5
+; AVX1-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovq %xmm3, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovq %xmm3, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovq %xmm3, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm2
+; AVX1-NEXT:    vpaddd %xmm2, %xmm2, %xmm5
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm4[2,3,4,5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm7 = xmm3[0,1],xmm4[2,3,4,5,6,7]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vmovq %xmm6, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vmovd %eax, %xmm6
+; AVX1-NEXT:    vpsrlq $32, %xmm2, %xmm7
+; AVX1-NEXT:    vpsrlq $32, %xmm3, %xmm8
+; AVX1-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX1-NEXT:    vmovq %xmm7, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm6, %xmm6
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm7, %xmm4, %xmm4
+; AVX1-NEXT:    vmovq %xmm4, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm6, %xmm4
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vmovq %xmm6, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm4
+; AVX1-NEXT:    vpaddd %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vinsertf128 $1, %xmm4, %ymm5, %ymm4
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm7 = xmm0[1,1,1,1]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX1-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm6
+; AVX1-NEXT:    vmovq %xmm6, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm0
+; AVX1-NEXT:    vpsrld $31, %xmm0, %xmm0
+; AVX1-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; AVX1-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm5
+; AVX1-NEXT:    vmovq %xmm5, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vmovq %xmm2, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrld $31, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulr_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm4[1,2,3]
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm6 = xmm3[0],xmm4[1,2,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX2-NEXT:    vmovq %xmm5, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vmovd %eax, %xmm5
+; AVX2-NEXT:    vpsrlq $32, %xmm2, %xmm6
+; AVX2-NEXT:    vpsrlq $32, %xmm3, %xmm7
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $1, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm6 = xmm1[0],xmm4[1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm7 = xmm0[0],xmm4[1,2,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vmovd %eax, %xmm6
+; AVX2-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; AVX2-NEXT:    vpsrlq $32, %xmm0, %xmm8
+; AVX2-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX2-NEXT:    vmovq %xmm7, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $1, %eax, %xmm6, %xmm6
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm7, %xmm4, %xmm4
+; AVX2-NEXT:    vmovq %xmm4, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm6, %xmm4
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpaddd %ymm4, %ymm4, %ymm4
+; AVX2-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm5
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX2-NEXT:    vmovq %xmm2, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm2
+; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm3
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX2-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX2-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX2-NEXT:    vmovq %xmm5, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-NEXT:    vpsrld $31, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm4, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulr_v8i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX512-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm5 = xmm2[0],xmm4[1,2,3]
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm3[0],xmm4[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm5
+; AVX512-NEXT:    vpsrlq $32, %xmm2, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm3, %xmm7
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm8 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm7 = xmm1[0],xmm4[1,2,3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm0[0],xmm4[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm4, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm4
+; AVX512-NEXT:    vpsrlq $32, %xmm1, %xmm7
+; AVX512-NEXT:    vpsrlq $32, %xmm0, %xmm8
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm6, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vinserti128 $1, %xmm5, %ymm4, %ymm4
+; AVX512-NEXT:    vpaddd %ymm4, %ymm4, %ymm4
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm2[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm2
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX512-NEXT:    vpsrld $31, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm4, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <8 x i32> %a to <8 x i64>
+  %b.ext = zext <8 x i32> %b to <8 x i64>
+  %clmul = call <8 x i64> @llvm.clmul.v8i64(<8 x i64> %a.ext, <8 x i64> %b.ext)
+  %res.ext = lshr <8 x i64> %clmul, splat (i64 31)
+  %res = trunc <8 x i64> %res.ext to <8 x i32>
+  ret <8 x i32> %res
+}
+
+define <4 x i64> @clmulr_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
+; AVX1-LABEL: clmulr_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm2
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm4 = xmm3[1],xmm2[1]
+; AVX1-NEXT:    vpaddq %xmm4, %xmm4, %xmm4
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm1 = xmm0[1],xmm5[1]
+; AVX1-NEXT:    vpaddq %xmm1, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm4, %ymm1
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX1-NEXT:    vpsrlq $63, %xmm2, %xmm2
+; AVX1-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
+; AVX1-NEXT:    vpsrlq $63, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT:    vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-PCLMUL-LABEL: clmulr_v4i64:
+; AVX2-PCLMUL:       # %bb.0:
+; AVX2-PCLMUL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-PCLMUL-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm4
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX2-PCLMUL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX2-PCLMUL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-PCLMUL-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm4[1],ymm0[3],ymm4[3]
+; AVX2-PCLMUL-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
+; AVX2-PCLMUL-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm4[0],ymm0[2],ymm4[2]
+; AVX2-PCLMUL-NEXT:    vpsrlq $63, %ymm0, %ymm0
+; AVX2-PCLMUL-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-PCLMUL-NEXT:    retq
+;
+; AVX2-VPCLMULQDQ-LABEL: clmulr_v4i64:
+; AVX2-VPCLMULQDQ:       # %bb.0:
+; AVX2-VPCLMULQDQ-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm2
+; AVX2-VPCLMULQDQ-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
+; AVX2-VPCLMULQDQ-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX2-VPCLMULQDQ-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
+; AVX2-VPCLMULQDQ-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX2-VPCLMULQDQ-NEXT:    vpsrlq $63, %ymm0, %ymm0
+; AVX2-VPCLMULQDQ-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX2-VPCLMULQDQ-NEXT:    retq
+;
+; AVX512-LABEL: clmulr_v4i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm1 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX512-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2]
+; AVX512-NEXT:    vpsrlq $63, %ymm0, %ymm0
+; AVX512-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <4 x i64> %a to <4 x i128>
+  %b.ext = zext <4 x i64> %b to <4 x i128>
+  %clmul = call <4 x i128> @llvm.clmul.v4i128(<4 x i128> %a.ext, <4 x i128> %b.ext)
+  %res.ext = lshr <4 x i128> %clmul, splat (i128 63)
+  %res = trunc <4 x i128> %res.ext to <4 x i64>
+  ret <4 x i64> %res
+}
+
+define <32 x i8> @clmulh_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
+; AVX1-LABEL: clmulh_v32i8:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm5
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm5
+; AVX1-NEXT:    vpsrlw $4, %xmm4, %xmm4
+; AVX1-NEXT:    vpand %xmm2, %xmm4, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpor %xmm6, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm5, %ymm1, %ymm5
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm4, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpmullw %xmm1, %xmm9, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm7
+; AVX1-NEXT:    vpshufb %xmm7, %xmm3, %xmm7
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm7, %xmm0
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm7
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm7, %ymm6
+; AVX1-NEXT:    vbroadcastf128 {{.*#+}} ymm7 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT:    # ymm7 = mem[0,1,0,1]
+; AVX1-NEXT:    vandps %ymm7, %ymm6, %ymm10
+; AVX1-NEXT:    vbroadcastss {{.*#+}} ymm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT:    vpandn %xmm8, %xmm6, %xmm8
+; AVX1-NEXT:    vpmaddubsw %xmm8, %xmm0, %xmm8
+; AVX1-NEXT:    vpsllw $8, %xmm8, %xmm8
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm1, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vorps %ymm8, %ymm10, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vxorps %ymm8, %ymm9, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm12
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm10, %ymm12, %ymm10
+; AVX1-NEXT:    vxorps %ymm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm1, %xmm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm12
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm12, %ymm11
+; AVX1-NEXT:    vandps %ymm7, %ymm11, %ymm11
+; AVX1-NEXT:    vpandn %xmm9, %xmm6, %xmm9
+; AVX1-NEXT:    vpmaddubsw %xmm9, %xmm0, %xmm9
+; AVX1-NEXT:    vpsllw $8, %xmm9, %xmm9
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm1, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vorps %ymm9, %ymm11, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm12
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm10, %ymm12, %ymm10
+; AVX1-NEXT:    vxorps %ymm10, %ymm9, %ymm9
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm10
+; AVX1-NEXT:    vextractf128 $1, %ymm10, %xmm11
+; AVX1-NEXT:    vpmullw %xmm1, %xmm11, %xmm12
+; AVX1-NEXT:    vpmullw %xmm0, %xmm10, %xmm13
+; AVX1-NEXT:    vinsertf128 $1, %xmm12, %ymm13, %ymm12
+; AVX1-NEXT:    vandps %ymm7, %ymm12, %ymm7
+; AVX1-NEXT:    vpandn %xmm10, %xmm6, %xmm10
+; AVX1-NEXT:    vpmaddubsw %xmm10, %xmm0, %xmm10
+; AVX1-NEXT:    vpsllw $8, %xmm10, %xmm10
+; AVX1-NEXT:    vpandn %xmm11, %xmm6, %xmm11
+; AVX1-NEXT:    vpmaddubsw %xmm11, %xmm1, %xmm11
+; AVX1-NEXT:    vpsllw $8, %xmm11, %xmm11
+; AVX1-NEXT:    vinsertf128 $1, %xmm11, %ymm10, %ymm10
+; AVX1-NEXT:    vorps %ymm7, %ymm10, %ymm7
+; AVX1-NEXT:    vxorps %ymm7, %ymm9, %ymm7
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
+; AVX1-NEXT:    vextractf128 $1, %ymm5, %xmm8
+; AVX1-NEXT:    vpmullw %xmm1, %xmm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm5, %xmm0, %xmm10
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm10, %ymm9
+; AVX1-NEXT:    vandps %ymm6, %ymm9, %ymm9
+; AVX1-NEXT:    vpandn %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm0, %xmm0
+; AVX1-NEXT:    vpsllw $8, %xmm0, %xmm0
+; AVX1-NEXT:    vpandn %xmm8, %xmm6, %xmm5
+; AVX1-NEXT:    vpmaddubsw %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsllw $8, %xmm1, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT:    vorps %ymm0, %ymm9, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm7, %ymm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm5
+; AVX1-NEXT:    vpshufb %xmm5, %xmm4, %xmm5
+; AVX1-NEXT:    vpor %xmm5, %xmm1, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm5
+; AVX1-NEXT:    vpshufb %xmm5, %xmm3, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm4, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulh_v32i8:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm4
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX2-NEXT:    vpand %ymm2, %ymm0, %ymm5
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX2-NEXT:    # ymm0 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm5
+; AVX2-NEXT:    vpor %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm5
+; AVX2-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm8
+; AVX2-NEXT:    vpbroadcastw {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; AVX2-NEXT:    vpand %ymm5, %ymm8, %ymm8
+; AVX2-NEXT:    vpandn %ymm7, %ymm5, %ymm7
+; AVX2-NEXT:    vpmaddubsw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX2-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm6, %ymm5, %ymm6
+; AVX2-NEXT:    vpmaddubsw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpsllw $8, %ymm6, %ymm6
+; AVX2-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpxor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm8
+; AVX2-NEXT:    vpand %ymm5, %ymm8, %ymm8
+; AVX2-NEXT:    vpandn %ymm7, %ymm5, %ymm7
+; AVX2-NEXT:    vpmaddubsw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX2-NEXT:    vpor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm9
+; AVX2-NEXT:    vpand %ymm5, %ymm9, %ymm9
+; AVX2-NEXT:    vpandn %ymm8, %ymm5, %ymm8
+; AVX2-NEXT:    vpmaddubsw %ymm8, %ymm4, %ymm8
+; AVX2-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX2-NEXT:    vpor %ymm8, %ymm9, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm1, %ymm4, %ymm7
+; AVX2-NEXT:    vpand %ymm5, %ymm7, %ymm7
+; AVX2-NEXT:    vpandn %ymm1, %ymm5, %ymm1
+; AVX2-NEXT:    vpmaddubsw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm6, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm4
+; AVX2-NEXT:    vpshufb %ymm4, %ymm3, %ymm3
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulh_v32i8:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm2, %zmm2
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm4
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; AVX512-NEXT:    vpmullw %ymm4, %ymm1, %ymm4
+; AVX512-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512-NEXT:    vpxorq %zmm3, %zmm4, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm5
+; AVX512-NEXT:    vextracti64x4 $1, %zmm5, %ymm6
+; AVX512-NEXT:    vpmullw %ymm6, %ymm1, %ymm6
+; AVX512-NEXT:    vpmullw %ymm5, %ymm0, %ymm5
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm5, %zmm5
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm5 = zmm5 ^ zmm3 ^ zmm4
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm4
+; AVX512-NEXT:    vpmullw %ymm4, %ymm1, %ymm4
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm4
+; AVX512-NEXT:    vextracti64x4 $1, %zmm4, %ymm6
+; AVX512-NEXT:    vpmullw %ymm6, %ymm1, %ymm6
+; AVX512-NEXT:    vpmullw %ymm4, %ymm0, %ymm4
+; AVX512-NEXT:    vinserti64x4 $1, %ymm6, %zmm4, %zmm4
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm4 = zmm4 ^ zmm5 ^ zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm3
+; AVX512-NEXT:    vextracti64x4 $1, %zmm3, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm5
+; AVX512-NEXT:    vpmullw %ymm3, %ymm0, %ymm3
+; AVX512-NEXT:    vinserti64x4 $1, %ymm5, %zmm3, %zmm3
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
+; AVX512-NEXT:    vextracti64x4 $1, %zmm2, %ymm5
+; AVX512-NEXT:    vpmullw %ymm5, %ymm1, %ymm1
+; AVX512-NEXT:    vpmullw %ymm2, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm4 ^ zmm3
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm1
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT:    vpmovdb %zmm1, %xmm1
+; AVX512-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512-NEXT:    vpsrlw $8, %ymm0, %ymm0
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vpmovdb %zmm0, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <32 x i8> %a to <32 x i16>
+  %b.ext = zext <32 x i8> %b to <32 x i16>
+  %clmul = call <32 x i16> @llvm.clmul.v32i16(<32 x i16> %a.ext, <32 x i16> %b.ext)
+  %res.ext = lshr <32 x i16> %clmul, splat (i16 8)
+  %res = trunc <32 x i16> %res.ext to <32 x i8>
+  ret <32 x i8> %res
+}
+
+define <16 x i16> @clmulh_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
+; AVX1-LABEL: clmulh_v16i16:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; AVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm5
+; AVX1-NEXT:    vbroadcastss {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm6
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm5, %xmm5
+; AVX1-NEXT:    vpand %xmm2, %xmm5, %xmm7
+; AVX1-NEXT:    vmovdqa {{.*#+}} xmm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX1-NEXT:    vpshufb %xmm7, %xmm5, %xmm7
+; AVX1-NEXT:    vpor %xmm7, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm7
+; AVX1-NEXT:    vpshufb %xmm7, %xmm4, %xmm7
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm7, %xmm1
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm1, %ymm1
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX1-NEXT:    vextractf128 $1, %ymm7, %xmm8
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT:    vpshufb %xmm3, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm6, %xmm9
+; AVX1-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
+; AVX1-NEXT:    vpsrlw $4, %xmm6, %xmm6
+; AVX1-NEXT:    vpand %xmm2, %xmm6, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm5, %xmm6
+; AVX1-NEXT:    vpor %xmm6, %xmm9, %xmm6
+; AVX1-NEXT:    vpmullw %xmm6, %xmm8, %xmm8
+; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm9
+; AVX1-NEXT:    vpshufb %xmm9, %xmm4, %xmm9
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm9, %xmm0
+; AVX1-NEXT:    vpmullw %xmm7, %xmm0, %xmm7
+; AVX1-NEXT:    vinsertf128 $1, %xmm8, %ymm7, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vxorps %ymm7, %ymm8, %ymm7
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX1-NEXT:    vextractf128 $1, %ymm8, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm9
+; AVX1-NEXT:    vpmullw %xmm0, %xmm8, %xmm8
+; AVX1-NEXT:    vinsertf128 $1, %xmm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX1-NEXT:    vextractf128 $1, %ymm9, %xmm10
+; AVX1-NEXT:    vpmullw %xmm6, %xmm10, %xmm10
+; AVX1-NEXT:    vpmullw %xmm0, %xmm9, %xmm9
+; AVX1-NEXT:    vinsertf128 $1, %xmm10, %ymm9, %ymm9
+; AVX1-NEXT:    vxorps %ymm9, %ymm8, %ymm8
+; AVX1-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm9
+; AVX1-NEXT:    vpmullw %xmm6, %xmm9, %xmm6
+; AVX1-NEXT:    vpmullw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm6, %ymm0, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm8, %ymm0
+; AVX1-NEXT:    vxorps %ymm0, %ymm7, %ymm0
+; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm6
+; AVX1-NEXT:    vpshufb %xmm6, %xmm4, %xmm6
+; AVX1-NEXT:    vpsrlw $4, %xmm1, %xmm1
+; AVX1-NEXT:    vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT:    vpshufb %xmm1, %xmm5, %xmm1
+; AVX1-NEXT:    vpor %xmm1, %xmm6, %xmm1
+; AVX1-NEXT:    vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm3
+; AVX1-NEXT:    vpshufb %xmm3, %xmm4, %xmm3
+; AVX1-NEXT:    vpsrlw $4, %xmm0, %xmm0
+; AVX1-NEXT:    vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT:    vpshufb %xmm0, %xmm5, %xmm0
+; AVX1-NEXT:    vpor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT:    vpsrlw $1, %xmm0, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulh_v16i16:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vmovdqa {{.*#+}} ymm5 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX2-NEXT:    vpshufb %ymm5, %ymm0, %ymm3
+; AVX2-NEXT:    vpbroadcastb {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX2-NEXT:    vpand %ymm0, %ymm3, %ymm4
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm2 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX2-NEXT:    # ymm2 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm4, %ymm2, %ymm4
+; AVX2-NEXT:    vpsrlw $4, %ymm3, %ymm3
+; AVX2-NEXT:    vpand %ymm0, %ymm3, %ymm6
+; AVX2-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX2-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX2-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX2-NEXT:    vpor %ymm6, %ymm4, %ymm4
+; AVX2-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX2-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm1
+; AVX2-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpxor %ymm6, %ymm7, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpand %ymm0, %ymm5, %ymm5
+; AVX2-NEXT:    vpshufb %ymm5, %ymm2, %ymm5
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm7
+; AVX2-NEXT:    vpmullw %ymm7, %ymm4, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm7, %ymm8, %ymm7
+; AVX2-NEXT:    vpxor %ymm7, %ymm6, %ymm6
+; AVX2-NEXT:    vpsllw $8, %ymm6, %ymm7
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX2-NEXT:    vpmullw %ymm4, %ymm9, %ymm9
+; AVX2-NEXT:    vpxor %ymm9, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm9
+; AVX2-NEXT:    vpmullw %ymm4, %ymm9, %ymm9
+; AVX2-NEXT:    vpxor %ymm9, %ymm8, %ymm8
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX2-NEXT:    vpmullw %ymm1, %ymm4, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm8, %ymm1
+; AVX2-NEXT:    vpxor %ymm1, %ymm6, %ymm1
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm6
+; AVX2-NEXT:    vpmullw %ymm6, %ymm4, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm8
+; AVX2-NEXT:    vpmullw %ymm4, %ymm8, %ymm8
+; AVX2-NEXT:    vpxor %ymm6, %ymm8, %ymm6
+; AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
+; AVX2-NEXT:    vpmullw %ymm5, %ymm4, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm6, %ymm4
+; AVX2-NEXT:    vpxor %ymm4, %ymm1, %ymm1
+; AVX2-NEXT:    vpsrlw $8, %ymm1, %ymm1
+; AVX2-NEXT:    vpor %ymm1, %ymm7, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm4
+; AVX2-NEXT:    vpshufb %ymm4, %ymm2, %ymm2
+; AVX2-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX2-NEXT:    vpand %ymm0, %ymm1, %ymm0
+; AVX2-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX2-NEXT:    vpor %ymm0, %ymm2, %ymm0
+; AVX2-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulh_v16i16:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm2
+; AVX512-NEXT:    vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm2
+; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm3
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm4
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm3, %xmm4, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm3
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm4
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm7 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm3, %xmm4, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm5, %xmm5
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm4, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm5, %xmm3
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpsrld $16, %zmm0, %zmm0
+; AVX512-NEXT:    vpmovdw %zmm0, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <16 x i16> %a to <16 x i32>
+  %b.ext = zext <16 x i16> %b to <16 x i32>
+  %clmul = call <16 x i32> @llvm.clmul.v16i32(<16 x i32> %a.ext, <16 x i32> %b.ext)
+  %res.ext = lshr <16 x i32> %clmul, splat (i32 16)
+  %res = trunc <16 x i32> %res.ext to <16 x i16>
+  ret <16 x i16> %res
+}
+
+define <8 x i32> @clmulh_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
+; AVX1-LABEL: clmulh_v8i32:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm3
+; AVX1-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm3[0,1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm6 = xmm5[0,1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm4, %xmm6, %xmm4
+; AVX1-NEXT:    vmovq %xmm4, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vmovd %eax, %xmm4
+; AVX1-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX1-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX1-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vmovq %xmm6, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX1-NEXT:    vmovq %xmm6, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX1-NEXT:    vmovq %xmm3, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm3
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm4 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vpblendw {{.*#+}} xmm5 = xmm0[0,1],xmm2[2,3,4,5,6,7]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX1-NEXT:    vmovq %xmm4, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vmovd %eax, %xmm4
+; AVX1-NEXT:    vpsrlq $32, %xmm1, %xmm5
+; AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm6
+; AVX1-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX1-NEXT:    vmovq %xmm5, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX1-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX1-NEXT:    vpclmulqdq $0, %xmm5, %xmm2, %xmm2
+; AVX1-NEXT:    vmovq %xmm2, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm2
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vmovq %xmm0, %rax
+; AVX1-NEXT:    shrq $32, %rax
+; AVX1-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: clmulh_v8i32:
+; AVX2:       # %bb.0:
+; AVX2-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3]
+; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm2[1,2,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm4, %xmm6, %xmm4
+; AVX2-NEXT:    vmovq %xmm4, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vmovd %eax, %xmm4
+; AVX2-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX2-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX2-NEXT:    vmovq %xmm6, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX2-NEXT:    vmovq %xmm3, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm3
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm4 = xmm1[0],xmm2[1,2,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm5 = xmm0[0],xmm2[1,2,3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX2-NEXT:    vmovq %xmm4, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vmovd %eax, %xmm4
+; AVX2-NEXT:    vpsrlq $32, %xmm1, %xmm5
+; AVX2-NEXT:    vpsrlq $32, %xmm0, %xmm6
+; AVX2-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX2-NEXT:    vmovq %xmm5, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; AVX2-NEXT:    vpunpckhdq {{.*#+}} xmm2 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; AVX2-NEXT:    vpclmulqdq $0, %xmm5, %xmm2, %xmm2
+; AVX2-NEXT:    vmovq %xmm2, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm2
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX2-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vmovq %xmm0, %rax
+; AVX2-NEXT:    shrq $32, %rax
+; AVX2-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX2-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: clmulh_v8i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm3
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm3[0],xmm2[1,2,3]
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm6, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm4
+; AVX512-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpxor %xmm6, %xmm6, %xmm6
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm5, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm3
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm1[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm2, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vpsrlq $32, %xmm1, %xmm4
+; AVX512-NEXT:    vpsrlq $32, %xmm0, %xmm5
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm4 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <8 x i32> %a to <8 x i64>
+  %b.ext = zext <8 x i32> %b to <8 x i64>
+  %clmul = call <8 x i64> @llvm.clmul.v8i64(<8 x i64> %a.ext, <8 x i64> %b.ext)
+  %res.ext = lshr <8 x i64> %clmul, splat (i64 32)
+  %res = trunc <8 x i64> %res.ext to <8 x i32>
+  ret <8 x i32> %res
+}
+
+define <4 x i64> @clmulh_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
+; AVX1-LABEL: clmulh_v4i64:
+; AVX1:       # %bb.0:
+; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm4
+; AVX1-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm4[1]
+; AVX1-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm3
+; AVX1-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX1-NEXT:    vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
+; AVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-PCLMUL-LABEL: clmulh_v4i64:
+; AVX2-PCLMUL:       # %bb.0:
+; AVX2-PCLMUL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; AVX2-PCLMUL-NEXT:    vextracti128 $1, %ymm0, %xmm3
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm4
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX2-PCLMUL-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX2-PCLMUL-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX2-PCLMUL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; AVX2-PCLMUL-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm4[1],ymm0[3],ymm4[3]
+; AVX2-PCLMUL-NEXT:    retq
+;
+; AVX2-VPCLMULQDQ-LABEL: clmulh_v4i64:
+; AVX2-VPCLMULQDQ:       # %bb.0:
+; AVX2-VPCLMULQDQ-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm2
+; AVX2-VPCLMULQDQ-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
+; AVX2-VPCLMULQDQ-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX2-VPCLMULQDQ-NEXT:    retq
+;
+; AVX512-LABEL: clmulh_v4i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpclmulqdq $17, %ymm1, %ymm0, %ymm2
+; AVX512-NEXT:    vpclmulqdq $0, %ymm1, %ymm0, %ymm0
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
+; AVX512-NEXT:    retq
+  %a.ext = zext <4 x i64> %a to <4 x i128>
+  %b.ext = zext <4 x i64> %b to <4 x i128>
+  %clmul = call <4 x i128> @llvm.clmul.v4i128(<4 x i128> %a.ext, <4 x i128> %b.ext)
+  %res.ext = lshr <4 x i128> %clmul, splat (i128 64)
+  %res = trunc <4 x i128> %res.ext to <4 x i64>
+  ret <4 x i64> %res
+}
diff --git a/llvm/test/CodeGen/X86/clmul-vector-512.ll b/llvm/test/CodeGen/X86/clmul-vector-512.ll
index 92b81de640a9c..6a5f1f4b6d660 100644
--- a/llvm/test/CodeGen/X86/clmul-vector-512.ll
+++ b/llvm/test/CodeGen/X86/clmul-vector-512.ll
@@ -300,6 +300,1598 @@ define <8 x i64> @clmul_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
   ret <8 x i64> %res
 }
 
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX512F: {{.*}}
-; AVX512VL: {{.*}}
+define <64 x i8> @clmulr_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
+; AVX512F-LABEL: clmulr_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm2, %ymm3, %ymm5
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm3, %ymm3
+; AVX512F-NEXT:    vpand %ymm2, %ymm3, %ymm6
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX512F-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm7
+; AVX512F-NEXT:    vpsrlw $4, %ymm7, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm8
+; AVX512F-NEXT:    vpand %ymm2, %ymm8, %ymm8
+; AVX512F-NEXT:    vpshufb %ymm8, %ymm3, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm10, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm11
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm4, %ymm11
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm11 & zmm6)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm10
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm10, %ymm13
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512F-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm0, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512F-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512F-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm10 = zmm9 ^ (zmm10 | zmm12)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm13
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512F-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512F-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512F-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm10 ^ (zmm9 | zmm12)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm10, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm12
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512F-NEXT:    vpandq %zmm6, %zmm11, %zmm11
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512F-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm9 ^ (zmm8 | zmm11)
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512F-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512F-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512F-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm7 ^ (zmm8 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512F-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
+; AVX512F-NEXT:    vpandq %zmm6, %zmm9, %zmm9
+; AVX512F-NEXT:    vpandn %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm1
+; AVX512F-NEXT:    vpmaddubsw %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm7 ^ (zmm0 | zmm9)
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm0, %zmm4, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: clmulr_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm3
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm2, %ymm3, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512VL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpand %ymm2, %ymm3, %ymm6
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512VL-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX512VL-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm7
+; AVX512VL-NEXT:    vpsrlw $4, %ymm7, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm3, %ymm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm8
+; AVX512VL-NEXT:    vpand %ymm2, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpshufb %ymm8, %ymm3, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm10, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm11
+; AVX512VL-NEXT:    vpshufb %ymm11, %ymm4, %ymm11
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm11 & zmm6)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm10
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm10, %ymm13
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm0, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512VL-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512VL-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm10 = zmm9 ^ (zmm10 | zmm12)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm13
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512VL-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512VL-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm10 ^ (zmm9 | zmm12)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm10, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm12
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm11, %zmm11
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512VL-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512VL-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm9 ^ (zmm8 | zmm11)
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpshufb %ymm7, %ymm4, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512VL-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512VL-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm7 ^ (zmm8 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm1, %ymm0, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpandn %ymm1, %ymm6, %ymm1
+; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm1
+; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm7 ^ (zmm0 | zmm9)
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm1
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm4
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm4, %zmm4
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm0, %zmm4, %zmm0
+; AVX512VL-NEXT:    retq
+  %a.ext = zext <64 x i8> %a to <64 x i16>
+  %b.ext = zext <64 x i8> %b to <64 x i16>
+  %clmul = call <64 x i16> @llvm.clmul.v64i16(<64 x i16> %a.ext, <64 x i16> %b.ext)
+  %res.ext = lshr <64 x i16> %clmul, splat (i16 7)
+  %res = trunc <64 x i16> %res.ext to <64 x i8>
+  ret <64 x i8> %res
+}
+
+define <32 x i16> @clmulr_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
+; AVX512F-LABEL: clmulr_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm7, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512F-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vporq %zmm5, %zmm6, %zmm5
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm9
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsrlw $4, %ymm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm9
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm9, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpxorq %zmm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm7 ^ zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm5, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
+; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm4
+; AVX512F-NEXT:    vpand %ymm2, %ymm4, %ymm4
+; AVX512F-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512F-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: clmulr_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512VL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm1, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512VL-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512VL-NEXT:    vporq %zmm5, %zmm6, %zmm5
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpsrlw $4, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
+; AVX512VL-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm9, %ymm0
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpxorq %zmm7, %zmm8, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm7 ^ zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm5, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm6
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm5
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm3, %zmm3
+; AVX512VL-NEXT:    vpsrlw $4, %ymm5, %ymm4
+; AVX512VL-NEXT:    vpand %ymm2, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpshufb %ymm4, %ymm1, %ymm4
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512VL-NEXT:    vporq %zmm0, %zmm3, %zmm0
+; AVX512VL-NEXT:    retq
+  %a.ext = zext <32 x i16> %a to <32 x i32>
+  %b.ext = zext <32 x i16> %b to <32 x i32>
+  %clmul = call <32 x i32> @llvm.clmul.v32i32(<32 x i32> %a.ext, <32 x i32> %b.ext)
+  %res.ext = lshr <32 x i32> %clmul, splat (i32 15)
+  %res = trunc <32 x i32> %res.ext to <32 x i16>
+  ret <32 x i16> %res
+}
+
+define <16 x i32> @clmulr_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
+; AVX512-LABEL: clmulr_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm2
+; AVX512-NEXT:    vpxor %xmm8, %xmm8, %xmm8
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm4 = xmm2[0],xmm8[1,2,3]
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm3
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm5 = xmm3[0],xmm8[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm4
+; AVX512-NEXT:    vpsrlq $32, %xmm2, %xmm5
+; AVX512-NEXT:    vpsrlq $32, %xmm3, %xmm6
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpxor %xmm9, %xmm9, %xmm9
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm9[2],xmm2[3],xmm9[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm9[2],xmm3[3],xmm9[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm6 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm6
+; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm4
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm7 = xmm4[0],xmm8[1,2,3]
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm10 = xmm5[0],xmm8[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm10, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm7
+; AVX512-NEXT:    vpsrlq $32, %xmm4, %xmm10
+; AVX512-NEXT:    vpsrlq $32, %xmm5, %xmm11
+; AVX512-NEXT:    vpclmulqdq $0, %xmm10, %xmm11, %xmm10
+; AVX512-NEXT:    vmovq %xmm10, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm7, %xmm7
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm10 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm11 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm10, %xmm11, %xmm10
+; AVX512-NEXT:    vmovq %xmm10, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm7, %xmm7
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm10 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm11 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm10, %xmm11, %xmm10
+; AVX512-NEXT:    vmovq %xmm10, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm7, %xmm7
+; AVX512-NEXT:    vinserti128 $1, %xmm6, %ymm7, %ymm10
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm6
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm11 = xmm6[0],xmm8[1,2,3]
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm12 = xmm7[0],xmm8[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm11, %xmm12, %xmm11
+; AVX512-NEXT:    vmovq %xmm11, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm11
+; AVX512-NEXT:    vpsrlq $32, %xmm6, %xmm12
+; AVX512-NEXT:    vpsrlq $32, %xmm7, %xmm13
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm13, %xmm12
+; AVX512-NEXT:    vmovq %xmm12, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm11, %xmm11
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm12 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm13 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm13, %xmm12
+; AVX512-NEXT:    vmovq %xmm12, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm11, %xmm11
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm6[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm13 = xmm7[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm13, %xmm12
+; AVX512-NEXT:    vmovq %xmm12, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm11, %xmm11
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm12 = xmm1[0],xmm8[1,2,3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm8 = xmm0[0],xmm8[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm8, %xmm8
+; AVX512-NEXT:    vmovq %xmm8, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm8
+; AVX512-NEXT:    vpsrlq $32, %xmm1, %xmm12
+; AVX512-NEXT:    vpsrlq $32, %xmm0, %xmm13
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm13, %xmm12
+; AVX512-NEXT:    vmovq %xmm12, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm8, %xmm8
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm12 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm12, %xmm9, %xmm9
+; AVX512-NEXT:    vmovq %xmm9, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm8, %xmm8
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm9 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm12 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm9, %xmm12, %xmm9
+; AVX512-NEXT:    vmovq %xmm9, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm8, %xmm8
+; AVX512-NEXT:    vinserti128 $1, %xmm11, %ymm8, %ymm8
+; AVX512-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512-NEXT:    vpaddd %zmm8, %zmm8, %zmm8
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm9
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm10 = xmm2[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm11 = xmm3[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm10, %xmm11, %xmm10
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm2, %xmm3, %xmm10
+; AVX512-NEXT:    vmovq %xmm10, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm9, %xmm9
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm2, %xmm3, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm9, %xmm2
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm9 = xmm4[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm10 = xmm5[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm9, %xmm10, %xmm9
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm4, %xmm5, %xmm9
+; AVX512-NEXT:    vmovq %xmm9, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm4[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm5[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm3
+; AVX512-NEXT:    vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm6, %xmm7, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm4 = xmm6[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm7[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm3, %xmm3
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm6 = xmm0[1,1,1,1]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm6, %xmm5
+; AVX512-NEXT:    vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; AVX512-NEXT:    vpclmulqdq $17, %xmm1, %xmm0, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm4, %xmm4
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[3,3,3,3]
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm4, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm3, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm2, %zmm0, %zmm0
+; AVX512-NEXT:    vpsrld $31, %zmm0, %zmm0
+; AVX512-NEXT:    vpord %zmm8, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <16 x i32> %a to <16 x i64>
+  %b.ext = zext <16 x i32> %b to <16 x i64>
+  %clmul = call <16 x i64> @llvm.clmul.v16i64(<16 x i64> %a.ext, <16 x i64> %b.ext)
+  %res.ext = lshr <16 x i64> %clmul, splat (i64 31)
+  %res = trunc <16 x i64> %res.ext to <16 x i32>
+  ret <16 x i32> %res
+}
+
+define <8 x i64> @clmulr_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
+; AVX512-LABEL: clmulr_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpclmulqdq $17, %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vpclmulqdq $0, %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} zmm1 = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
+; AVX512-NEXT:    vpaddq %zmm1, %zmm1, %zmm1
+; AVX512-NEXT:    vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm2[0],zmm0[2],zmm2[2],zmm0[4],zmm2[4],zmm0[6],zmm2[6]
+; AVX512-NEXT:    vpsrlq $63, %zmm0, %zmm0
+; AVX512-NEXT:    vporq %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <8 x i64> %a to <8 x i128>
+  %b.ext = zext <8 x i64> %b to <8 x i128>
+  %clmul = call <8 x i128> @llvm.clmul.v8i128(<8 x i128> %a.ext, <8 x i128> %b.ext)
+  %res.ext = lshr <8 x i128> %clmul, splat (i128 63)
+  %res = trunc <8 x i128> %res.ext to <8 x i64>
+  ret <8 x i64> %res
+}
+
+define <64 x i8> @clmulh_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
+; AVX512F-LABEL: clmulh_v64i8:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm2, %ymm4, %ymm5
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512F-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm4, %ymm4
+; AVX512F-NEXT:    vpand %ymm2, %ymm4, %ymm6
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm7
+; AVX512F-NEXT:    vpsrlw $4, %ymm7, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpsrlw $4, %ymm1, %ymm8
+; AVX512F-NEXT:    vpand %ymm2, %ymm8, %ymm8
+; AVX512F-NEXT:    vpshufb %ymm8, %ymm4, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm10, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm11
+; AVX512F-NEXT:    vpshufb %ymm11, %ymm3, %ymm11
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512F-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm11 & zmm6)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm10
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm10, %ymm13
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512F-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm0, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512F-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512F-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm10 = zmm9 ^ (zmm10 | zmm12)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm13
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512F-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512F-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512F-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm10 ^ (zmm9 | zmm12)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm10, %ymm11
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm12
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512F-NEXT:    vpandq %zmm6, %zmm11, %zmm11
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512F-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512F-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512F-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512F-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm9 ^ (zmm8 | zmm11)
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512F-NEXT:    vpshufb %ymm7, %ymm3, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512F-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512F-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512F-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm7 ^ (zmm8 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512F-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512F-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512F-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512F-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512F-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512F-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm1, %ymm0, %ymm10
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
+; AVX512F-NEXT:    vpandq %zmm6, %zmm9, %zmm9
+; AVX512F-NEXT:    vpandn %ymm1, %ymm6, %ymm1
+; AVX512F-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0
+; AVX512F-NEXT:    vpsllw $8, %ymm0, %ymm0
+; AVX512F-NEXT:    vpandn %ymm8, %ymm6, %ymm1
+; AVX512F-NEXT:    vpmaddubsw %ymm1, %ymm5, %ymm1
+; AVX512F-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm7 ^ (zmm0 | zmm9)
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm5
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512F-NEXT:    vpor %ymm5, %ymm1, %ymm1
+; AVX512F-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm5
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: clmulh_v64i8:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm4
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm2, %ymm4, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512VL-NEXT:    # ymm3 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm3, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm4, %ymm4
+; AVX512VL-NEXT:    vpand %ymm2, %ymm4, %ymm6
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512VL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpor %ymm6, %ymm5, %ymm5
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm7
+; AVX512VL-NEXT:    vpsrlw $4, %ymm7, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm1, %ymm8
+; AVX512VL-NEXT:    vpand %ymm2, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpshufb %ymm8, %ymm4, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm10, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm11
+; AVX512VL-NEXT:    vpshufb %ymm11, %ymm3, %ymm11
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm11, %ymm0
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm11, %zmm11
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} zmm6 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 | (zmm11 & zmm6)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm10
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm10, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm10, %ymm13
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm0, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512VL-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512VL-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm10 = zmm9 ^ (zmm10 | zmm12)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm11, %ymm12
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm13
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm12, %zmm12
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpandn %ymm11, %ymm6, %ymm11
+; AVX512VL-NEXT:    vpmaddubsw %ymm11, %ymm5, %ymm11
+; AVX512VL-NEXT:    vpsllw $8, %ymm11, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm10 ^ (zmm9 | zmm12)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm8, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm10, %ymm11
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm12
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm11, %zmm12, %zmm11
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm11, %zmm11
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512VL-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512VL-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpandn %ymm10, %ymm6, %ymm10
+; AVX512VL-NEXT:    vpmaddubsw %ymm10, %ymm5, %ymm10
+; AVX512VL-NEXT:    vpsllw $8, %ymm10, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm9 ^ (zmm8 | zmm11)
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpshufb %ymm7, %ymm3, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm7, %zmm1, %zmm1
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm8
+; AVX512VL-NEXT:    vpmaddubsw %ymm8, %ymm0, %ymm8
+; AVX512VL-NEXT:    vpsllw $8, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm7 ^ (zmm8 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm11
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm11, %zmm10
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm10, %zmm10
+; AVX512VL-NEXT:    vpandn %ymm7, %ymm6, %ymm7
+; AVX512VL-NEXT:    vpmaddubsw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vpsllw $8, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpandn %ymm9, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpmaddubsw %ymm9, %ymm5, %ymm9
+; AVX512VL-NEXT:    vpsllw $8, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm7 = zmm8 ^ (zmm7 | zmm10)
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm1, %ymm0, %ymm10
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm10, %zmm9
+; AVX512VL-NEXT:    vpandq %zmm6, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpandn %ymm1, %ymm6, %ymm1
+; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpsllw $8, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpandn %ymm8, %ymm6, %ymm1
+; AVX512VL-NEXT:    vpmaddubsw %ymm1, %ymm5, %ymm1
+; AVX512VL-NEXT:    vpsllw $8, %ymm1, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm1, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm7 ^ (zmm0 | zmm9)
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm3, %ymm1
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm4, %ymm5
+; AVX512VL-NEXT:    vpor %ymm5, %ymm1, %ymm1
+; AVX512VL-NEXT:    vpsrlw $1, %ymm1, %ymm1
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm3, %ymm3
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm4, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; AVX512VL-NEXT:    retq
+  %a.ext = zext <64 x i8> %a to <64 x i16>
+  %b.ext = zext <64 x i8> %b to <64 x i16>
+  %clmul = call <64 x i16> @llvm.clmul.v64i16(<64 x i16> %a.ext, <64 x i16> %b.ext)
+  %res.ext = lshr <64 x i16> %clmul, splat (i16 8)
+  %res = trunc <64 x i16> %res.ext to <64 x i8>
+  ret <64 x i8> %res
+}
+
+define <32 x i16> @clmulh_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
+; AVX512F-LABEL: clmulh_v32i16:
+; AVX512F:       # %bb.0:
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512F-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm2, %ymm5
+; AVX512F-NEXT:    vpbroadcastb {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512F-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm1, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm1
+; AVX512F-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX512F-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512F-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512F-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512F-NEXT:    vpsrlw $4, %ymm7, %ymm7
+; AVX512F-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512F-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512F-NEXT:    vporq %zmm5, %zmm6, %zmm5
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm9
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsrlw $4, %ymm6, %ymm6
+; AVX512F-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
+; AVX512F-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm9
+; AVX512F-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm9, %ymm0
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpxorq %zmm7, %zmm8, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm7 ^ zmm8
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512F-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512F-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm5, %ymm9
+; AVX512F-NEXT:    vpmullw %ymm6, %ymm9, %ymm6
+; AVX512F-NEXT:    vpmullw %ymm5, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512F-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm8 ^ zmm7
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm0, %ymm5
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512F-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512F-NEXT:    vpsrlw $4, %ymm5, %ymm5
+; AVX512F-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX512F-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512F-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512F-NEXT:    vpsrlw $1, %ymm5, %ymm5
+; AVX512F-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX512F-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512F-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512F-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512F-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512F-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512F-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512F-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
+; AVX512F-NEXT:    retq
+;
+; AVX512VL-LABEL: clmulh_v32i16:
+; AVX512VL:       # %bb.0:
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm1, %ymm2
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} ymm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm2, %ymm5
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240,0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
+; AVX512VL-NEXT:    # ymm4 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm1, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm1
+; AVX512VL-NEXT:    vpshufb %ymm1, %ymm4, %ymm1
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm1, %zmm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm5, %ymm1
+; AVX512VL-NEXT:    vpand %ymm2, %ymm1, %ymm5
+; AVX512VL-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15,0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
+; AVX512VL-NEXT:    # ymm1 = mem[0,1,0,1]
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpsrlw $4, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpand %ymm2, %ymm7, %ymm7
+; AVX512VL-NEXT:    vpshufb %ymm7, %ymm1, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm5, %zmm7, %zmm5
+; AVX512VL-NEXT:    vporq %zmm5, %zmm6, %zmm5
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm9
+; AVX512VL-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpsrlw $4, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpand %ymm2, %ymm6, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm1, %ymm6
+; AVX512VL-NEXT:    vpor %ymm6, %ymm9, %ymm6
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm9
+; AVX512VL-NEXT:    vpshufb %ymm9, %ymm4, %ymm9
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm9, %ymm0
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpxorq %zmm7, %zmm8, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm7 ^ zmm8
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm9
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm9, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm9, %ymm9
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm9, %zmm9
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm9 = zmm9 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm8, %ymm8
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm8, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm8
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm8, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm10, %ymm10
+; AVX512VL-NEXT:    vpmullw %ymm0, %ymm8, %ymm8
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm10, %zmm8, %zmm8
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm8 = zmm8 ^ zmm9 ^ zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm7
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm7, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm7, %ymm0, %ymm7
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm9, %zmm7, %zmm7
+; AVX512VL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm5, %zmm5
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm5, %ymm9
+; AVX512VL-NEXT:    vpmullw %ymm6, %ymm9, %ymm6
+; AVX512VL-NEXT:    vpmullw %ymm5, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm6, %zmm0, %zmm0
+; AVX512VL-NEXT:    vpternlogq {{.*#+}} zmm0 = zmm0 ^ zmm8 ^ zmm7
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm0, %ymm5
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm6
+; AVX512VL-NEXT:    vpshufb %ymm6, %ymm4, %ymm6
+; AVX512VL-NEXT:    vpsrlw $4, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpand %ymm2, %ymm5, %ymm5
+; AVX512VL-NEXT:    vpshufb %ymm5, %ymm1, %ymm5
+; AVX512VL-NEXT:    vpor %ymm5, %ymm6, %ymm5
+; AVX512VL-NEXT:    vpsrlw $1, %ymm5, %ymm5
+; AVX512VL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm3
+; AVX512VL-NEXT:    vpshufb %ymm3, %ymm4, %ymm3
+; AVX512VL-NEXT:    vpsrlw $4, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpand %ymm2, %ymm0, %ymm0
+; AVX512VL-NEXT:    vpshufb %ymm0, %ymm1, %ymm0
+; AVX512VL-NEXT:    vpor %ymm0, %ymm3, %ymm0
+; AVX512VL-NEXT:    vpsrlw $1, %ymm0, %ymm0
+; AVX512VL-NEXT:    vinserti64x4 $1, %ymm0, %zmm5, %zmm0
+; AVX512VL-NEXT:    retq
+  %a.ext = zext <32 x i16> %a to <32 x i32>
+  %b.ext = zext <32 x i16> %b to <32 x i32>
+  %clmul = call <32 x i32> @llvm.clmul.v32i32(<32 x i32> %a.ext, <32 x i32> %b.ext)
+  %res.ext = lshr <32 x i32> %clmul, splat (i32 16)
+  %res = trunc <32 x i32> %res.ext to <32 x i16>
+  ret <32 x i16> %res
+}
+
+define <16 x i32> @clmulh_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
+; AVX512-LABEL: clmulh_v16i32:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vextracti32x4 $3, %zmm1, %xmm4
+; AVX512-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm3 = xmm4[0],xmm2[1,2,3]
+; AVX512-NEXT:    vextracti32x4 $3, %zmm0, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm3, %xmm6, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm3
+; AVX512-NEXT:    vpsrlq $32, %xmm4, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm5, %xmm7
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm3, %xmm6
+; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm7, %xmm8, %xmm7
+; AVX512-NEXT:    vmovq %xmm7, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm6, %xmm6
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm4[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm4, %xmm5, %xmm4
+; AVX512-NEXT:    vmovq %xmm4, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm6, %xmm4
+; AVX512-NEXT:    vextracti32x4 $2, %zmm1, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm2[1,2,3]
+; AVX512-NEXT:    vextracti32x4 $2, %zmm0, %xmm7
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm8 = xmm7[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm8, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm5, %xmm8
+; AVX512-NEXT:    vpsrlq $32, %xmm7, %xmm9
+; AVX512-NEXT:    vpclmulqdq $0, %xmm8, %xmm9, %xmm8
+; AVX512-NEXT:    vmovq %xmm8, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm6, %xmm6
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm8, %xmm9, %xmm8
+; AVX512-NEXT:    vmovq %xmm8, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm6, %xmm6
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm7, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm6, %xmm5
+; AVX512-NEXT:    vinserti128 $1, %xmm4, %ymm5, %ymm4
+; AVX512-NEXT:    vextracti128 $1, %ymm1, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm5[0],xmm2[1,2,3]
+; AVX512-NEXT:    vextracti128 $1, %ymm0, %xmm7
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm8 = xmm7[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm8, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm5, %xmm8
+; AVX512-NEXT:    vpsrlq $32, %xmm7, %xmm9
+; AVX512-NEXT:    vpclmulqdq $0, %xmm8, %xmm9, %xmm8
+; AVX512-NEXT:    vmovq %xmm8, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm6, %xmm6
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm8 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm9 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm8, %xmm9, %xmm8
+; AVX512-NEXT:    vmovq %xmm8, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm6, %xmm6
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm5[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm7 = xmm7[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm5, %xmm7, %xmm5
+; AVX512-NEXT:    vmovq %xmm5, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm6, %xmm5
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm6 = xmm1[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpblendd {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm2, %xmm2
+; AVX512-NEXT:    vmovq %xmm2, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vmovd %eax, %xmm2
+; AVX512-NEXT:    vpsrlq $32, %xmm1, %xmm6
+; AVX512-NEXT:    vpsrlq $32, %xmm0, %xmm7
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm7, %xmm6
+; AVX512-NEXT:    vmovq %xmm6, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $1, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
+; AVX512-NEXT:    vpunpckhdq {{.*#+}} xmm3 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; AVX512-NEXT:    vpclmulqdq $0, %xmm6, %xmm3, %xmm3
+; AVX512-NEXT:    vmovq %xmm3, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $2, %eax, %xmm2, %xmm2
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vpclmulqdq $0, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vmovq %xmm0, %rax
+; AVX512-NEXT:    shrq $32, %rax
+; AVX512-NEXT:    vpinsrd $3, %eax, %xmm2, %xmm0
+; AVX512-NEXT:    vinserti128 $1, %xmm5, %ymm0, %ymm0
+; AVX512-NEXT:    vinserti64x4 $1, %ymm4, %zmm0, %zmm0
+; AVX512-NEXT:    retq
+  %a.ext = zext <16 x i32> %a to <16 x i64>
+  %b.ext = zext <16 x i32> %b to <16 x i64>
+  %clmul = call <16 x i64> @llvm.clmul.v16i64(<16 x i64> %a.ext, <16 x i64> %b.ext)
+  %res.ext = lshr <16 x i64> %clmul, splat (i64 32)
+  %res = trunc <16 x i64> %res.ext to <16 x i32>
+  ret <16 x i32> %res
+}
+
+define <8 x i64> @clmulh_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
+; AVX512-LABEL: clmulh_v8i64:
+; AVX512:       # %bb.0:
+; AVX512-NEXT:    vpclmulqdq $17, %zmm1, %zmm0, %zmm2
+; AVX512-NEXT:    vpclmulqdq $0, %zmm1, %zmm0, %zmm0
+; AVX512-NEXT:    vpunpckhqdq {{.*#+}} zmm0 = zmm0[1],zmm2[1],zmm0[3],zmm2[3],zmm0[5],zmm2[5],zmm0[7],zmm2[7]
+; AVX512-NEXT:    retq
+  %a.ext = zext <8 x i64> %a to <8 x i128>
+  %b.ext = zext <8 x i64> %b to <8 x i128>
+  %clmul = call <8 x i128> @llvm.clmul.v8i128(<8 x i128> %a.ext, <8 x i128> %b.ext)
+  %res.ext = lshr <8 x i128> %clmul, splat (i128 64)
+  %res = trunc <8 x i128> %res.ext to <8 x i64>
+  ret <8 x i64> %res
+}



More information about the llvm-commits mailing list