[llvm] be2be04 - [X86] Add common check prefixes to vector-bitreverse.ll

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 9 03:40:54 PST 2022


Author: Simon Pilgrim
Date: 2022-03-09T11:40:37Z
New Revision: be2be04f9f983966be828885437a25aa1ac6f6d3

URL: https://github.com/llvm/llvm-project/commit/be2be04f9f983966be828885437a25aa1ac6f6d3
DIFF: https://github.com/llvm/llvm-project/commit/be2be04f9f983966be828885437a25aa1ac6f6d3.diff

LOG: [X86] Add common check prefixes to vector-bitreverse.ll

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vector-bitreverse.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index dbf5db2aef685..6f6693ee6dbea 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -1,17 +1,17 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=AVX  --check-prefix=AVX512  --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX  --check-prefix=AVX512  --check-prefix=AVX512BW
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3,+gfni | FileCheck %s --check-prefix=ALL --check-prefix=GFNISSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefix=ALL --check-prefix=GFNIAVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefix=ALL --check-prefix=GFNIAVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+gfni | FileCheck %s --check-prefix=ALL --check-prefix=GFNIAVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+gfni | FileCheck %s --check-prefix=ALL --check-prefix=GFNIAVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=ALL,XOP,XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=ALL,XOP,XOPAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3,+gfni | FileCheck %s --check-prefixes=ALL,GFNISSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX512,GFNIAVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+gfni | FileCheck %s --check-prefixes=ALL,GFNIAVX,GFNIAVX512,GFNIAVX512BW
 
 ; Make sure we don't crash with avx512bw and xop
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx512bw
@@ -92,57 +92,6 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
 ; GFNIAVX-NEXT:    andb $85, %al
 ; GFNIAVX-NEXT:    orb %cl, %al
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_i8:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    rolb $4, %dil
-; GFNIAVX2-NEXT:    movl %edi, %eax
-; GFNIAVX2-NEXT:    andb $51, %al
-; GFNIAVX2-NEXT:    shlb $2, %al
-; GFNIAVX2-NEXT:    shrb $2, %dil
-; GFNIAVX2-NEXT:    andb $51, %dil
-; GFNIAVX2-NEXT:    orb %dil, %al
-; GFNIAVX2-NEXT:    movl %eax, %ecx
-; GFNIAVX2-NEXT:    andb $85, %cl
-; GFNIAVX2-NEXT:    addb %cl, %cl
-; GFNIAVX2-NEXT:    shrb %al
-; GFNIAVX2-NEXT:    andb $85, %al
-; GFNIAVX2-NEXT:    orb %cl, %al
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_i8:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    rolb $4, %dil
-; GFNIAVX512F-NEXT:    movl %edi, %eax
-; GFNIAVX512F-NEXT:    andb $51, %al
-; GFNIAVX512F-NEXT:    shlb $2, %al
-; GFNIAVX512F-NEXT:    shrb $2, %dil
-; GFNIAVX512F-NEXT:    andb $51, %dil
-; GFNIAVX512F-NEXT:    orb %dil, %al
-; GFNIAVX512F-NEXT:    movl %eax, %ecx
-; GFNIAVX512F-NEXT:    andb $85, %cl
-; GFNIAVX512F-NEXT:    addb %cl, %cl
-; GFNIAVX512F-NEXT:    shrb %al
-; GFNIAVX512F-NEXT:    andb $85, %al
-; GFNIAVX512F-NEXT:    orb %cl, %al
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_i8:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    rolb $4, %dil
-; GFNIAVX512BW-NEXT:    movl %edi, %eax
-; GFNIAVX512BW-NEXT:    andb $51, %al
-; GFNIAVX512BW-NEXT:    shlb $2, %al
-; GFNIAVX512BW-NEXT:    shrb $2, %dil
-; GFNIAVX512BW-NEXT:    andb $51, %dil
-; GFNIAVX512BW-NEXT:    orb %dil, %al
-; GFNIAVX512BW-NEXT:    movl %eax, %ecx
-; GFNIAVX512BW-NEXT:    andb $85, %cl
-; GFNIAVX512BW-NEXT:    addb %cl, %cl
-; GFNIAVX512BW-NEXT:    shrb %al
-; GFNIAVX512BW-NEXT:    andb $85, %al
-; GFNIAVX512BW-NEXT:    orb %cl, %al
-; GFNIAVX512BW-NEXT:    retq
   %b = call i8 @llvm.bitreverse.i8(i8 %a)
   ret i8 %b
 }
@@ -247,75 +196,6 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
 ; GFNIAVX-NEXT:    leal (%rax,%rcx,2), %eax
 ; GFNIAVX-NEXT:    # kill: def $ax killed $ax killed $eax
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_i16:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX2-NEXT:    rolw $8, %di
-; GFNIAVX2-NEXT:    movl %edi, %eax
-; GFNIAVX2-NEXT:    andl $3855, %eax # imm = 0xF0F
-; GFNIAVX2-NEXT:    shll $4, %eax
-; GFNIAVX2-NEXT:    shrl $4, %edi
-; GFNIAVX2-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNIAVX2-NEXT:    orl %eax, %edi
-; GFNIAVX2-NEXT:    movl %edi, %eax
-; GFNIAVX2-NEXT:    andl $13107, %eax # imm = 0x3333
-; GFNIAVX2-NEXT:    shrl $2, %edi
-; GFNIAVX2-NEXT:    andl $13107, %edi # imm = 0x3333
-; GFNIAVX2-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX2-NEXT:    movl %eax, %ecx
-; GFNIAVX2-NEXT:    andl $21845, %ecx # imm = 0x5555
-; GFNIAVX2-NEXT:    shrl %eax
-; GFNIAVX2-NEXT:    andl $21845, %eax # imm = 0x5555
-; GFNIAVX2-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX2-NEXT:    # kill: def $ax killed $ax killed $eax
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_i16:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX512F-NEXT:    rolw $8, %di
-; GFNIAVX512F-NEXT:    movl %edi, %eax
-; GFNIAVX512F-NEXT:    andl $3855, %eax # imm = 0xF0F
-; GFNIAVX512F-NEXT:    shll $4, %eax
-; GFNIAVX512F-NEXT:    shrl $4, %edi
-; GFNIAVX512F-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNIAVX512F-NEXT:    orl %eax, %edi
-; GFNIAVX512F-NEXT:    movl %edi, %eax
-; GFNIAVX512F-NEXT:    andl $13107, %eax # imm = 0x3333
-; GFNIAVX512F-NEXT:    shrl $2, %edi
-; GFNIAVX512F-NEXT:    andl $13107, %edi # imm = 0x3333
-; GFNIAVX512F-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX512F-NEXT:    movl %eax, %ecx
-; GFNIAVX512F-NEXT:    andl $21845, %ecx # imm = 0x5555
-; GFNIAVX512F-NEXT:    shrl %eax
-; GFNIAVX512F-NEXT:    andl $21845, %eax # imm = 0x5555
-; GFNIAVX512F-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX512F-NEXT:    # kill: def $ax killed $ax killed $eax
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_i16:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX512BW-NEXT:    rolw $8, %di
-; GFNIAVX512BW-NEXT:    movl %edi, %eax
-; GFNIAVX512BW-NEXT:    andl $3855, %eax # imm = 0xF0F
-; GFNIAVX512BW-NEXT:    shll $4, %eax
-; GFNIAVX512BW-NEXT:    shrl $4, %edi
-; GFNIAVX512BW-NEXT:    andl $3855, %edi # imm = 0xF0F
-; GFNIAVX512BW-NEXT:    orl %eax, %edi
-; GFNIAVX512BW-NEXT:    movl %edi, %eax
-; GFNIAVX512BW-NEXT:    andl $13107, %eax # imm = 0x3333
-; GFNIAVX512BW-NEXT:    shrl $2, %edi
-; GFNIAVX512BW-NEXT:    andl $13107, %edi # imm = 0x3333
-; GFNIAVX512BW-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX512BW-NEXT:    movl %eax, %ecx
-; GFNIAVX512BW-NEXT:    andl $21845, %ecx # imm = 0x5555
-; GFNIAVX512BW-NEXT:    shrl %eax
-; GFNIAVX512BW-NEXT:    andl $21845, %eax # imm = 0x5555
-; GFNIAVX512BW-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX512BW-NEXT:    # kill: def $ax killed $ax killed $eax
-; GFNIAVX512BW-NEXT:    retq
   %b = call i16 @llvm.bitreverse.i16(i16 %a)
   ret i16 %b
 }
@@ -415,72 +295,6 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
 ; GFNIAVX-NEXT:    andl $1431655765, %eax # imm = 0x55555555
 ; GFNIAVX-NEXT:    leal (%rax,%rcx,2), %eax
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_i32:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX2-NEXT:    bswapl %edi
-; GFNIAVX2-NEXT:    movl %edi, %eax
-; GFNIAVX2-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; GFNIAVX2-NEXT:    shll $4, %eax
-; GFNIAVX2-NEXT:    shrl $4, %edi
-; GFNIAVX2-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNIAVX2-NEXT:    orl %eax, %edi
-; GFNIAVX2-NEXT:    movl %edi, %eax
-; GFNIAVX2-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; GFNIAVX2-NEXT:    shrl $2, %edi
-; GFNIAVX2-NEXT:    andl $858993459, %edi # imm = 0x33333333
-; GFNIAVX2-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX2-NEXT:    movl %eax, %ecx
-; GFNIAVX2-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
-; GFNIAVX2-NEXT:    shrl %eax
-; GFNIAVX2-NEXT:    andl $1431655765, %eax # imm = 0x55555555
-; GFNIAVX2-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_i32:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX512F-NEXT:    bswapl %edi
-; GFNIAVX512F-NEXT:    movl %edi, %eax
-; GFNIAVX512F-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; GFNIAVX512F-NEXT:    shll $4, %eax
-; GFNIAVX512F-NEXT:    shrl $4, %edi
-; GFNIAVX512F-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNIAVX512F-NEXT:    orl %eax, %edi
-; GFNIAVX512F-NEXT:    movl %edi, %eax
-; GFNIAVX512F-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; GFNIAVX512F-NEXT:    shrl $2, %edi
-; GFNIAVX512F-NEXT:    andl $858993459, %edi # imm = 0x33333333
-; GFNIAVX512F-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX512F-NEXT:    movl %eax, %ecx
-; GFNIAVX512F-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
-; GFNIAVX512F-NEXT:    shrl %eax
-; GFNIAVX512F-NEXT:    andl $1431655765, %eax # imm = 0x55555555
-; GFNIAVX512F-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_i32:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    # kill: def $edi killed $edi def $rdi
-; GFNIAVX512BW-NEXT:    bswapl %edi
-; GFNIAVX512BW-NEXT:    movl %edi, %eax
-; GFNIAVX512BW-NEXT:    andl $252645135, %eax # imm = 0xF0F0F0F
-; GFNIAVX512BW-NEXT:    shll $4, %eax
-; GFNIAVX512BW-NEXT:    shrl $4, %edi
-; GFNIAVX512BW-NEXT:    andl $252645135, %edi # imm = 0xF0F0F0F
-; GFNIAVX512BW-NEXT:    orl %eax, %edi
-; GFNIAVX512BW-NEXT:    movl %edi, %eax
-; GFNIAVX512BW-NEXT:    andl $858993459, %eax # imm = 0x33333333
-; GFNIAVX512BW-NEXT:    shrl $2, %edi
-; GFNIAVX512BW-NEXT:    andl $858993459, %edi # imm = 0x33333333
-; GFNIAVX512BW-NEXT:    leal (%rdi,%rax,4), %eax
-; GFNIAVX512BW-NEXT:    movl %eax, %ecx
-; GFNIAVX512BW-NEXT:    andl $1431655765, %ecx # imm = 0x55555555
-; GFNIAVX512BW-NEXT:    shrl %eax
-; GFNIAVX512BW-NEXT:    andl $1431655765, %eax # imm = 0x55555555
-; GFNIAVX512BW-NEXT:    leal (%rax,%rcx,2), %eax
-; GFNIAVX512BW-NEXT:    retq
   %b = call i32 @llvm.bitreverse.i32(i32 %a)
   ret i32 %b
 }
@@ -588,78 +402,6 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
 ; GFNIAVX-NEXT:    andq %rcx, %rax
 ; GFNIAVX-NEXT:    leaq (%rax,%rdx,2), %rax
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_i64:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    bswapq %rdi
-; GFNIAVX2-NEXT:    movq %rdi, %rax
-; GFNIAVX2-NEXT:    shrq $4, %rax
-; GFNIAVX2-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
-; GFNIAVX2-NEXT:    andq %rcx, %rax
-; GFNIAVX2-NEXT:    andq %rcx, %rdi
-; GFNIAVX2-NEXT:    shlq $4, %rdi
-; GFNIAVX2-NEXT:    orq %rax, %rdi
-; GFNIAVX2-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
-; GFNIAVX2-NEXT:    movq %rdi, %rcx
-; GFNIAVX2-NEXT:    andq %rax, %rcx
-; GFNIAVX2-NEXT:    shrq $2, %rdi
-; GFNIAVX2-NEXT:    andq %rax, %rdi
-; GFNIAVX2-NEXT:    leaq (%rdi,%rcx,4), %rax
-; GFNIAVX2-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
-; GFNIAVX2-NEXT:    movq %rax, %rdx
-; GFNIAVX2-NEXT:    andq %rcx, %rdx
-; GFNIAVX2-NEXT:    shrq %rax
-; GFNIAVX2-NEXT:    andq %rcx, %rax
-; GFNIAVX2-NEXT:    leaq (%rax,%rdx,2), %rax
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_i64:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    bswapq %rdi
-; GFNIAVX512F-NEXT:    movq %rdi, %rax
-; GFNIAVX512F-NEXT:    shrq $4, %rax
-; GFNIAVX512F-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
-; GFNIAVX512F-NEXT:    andq %rcx, %rax
-; GFNIAVX512F-NEXT:    andq %rcx, %rdi
-; GFNIAVX512F-NEXT:    shlq $4, %rdi
-; GFNIAVX512F-NEXT:    orq %rax, %rdi
-; GFNIAVX512F-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
-; GFNIAVX512F-NEXT:    movq %rdi, %rcx
-; GFNIAVX512F-NEXT:    andq %rax, %rcx
-; GFNIAVX512F-NEXT:    shrq $2, %rdi
-; GFNIAVX512F-NEXT:    andq %rax, %rdi
-; GFNIAVX512F-NEXT:    leaq (%rdi,%rcx,4), %rax
-; GFNIAVX512F-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
-; GFNIAVX512F-NEXT:    movq %rax, %rdx
-; GFNIAVX512F-NEXT:    andq %rcx, %rdx
-; GFNIAVX512F-NEXT:    shrq %rax
-; GFNIAVX512F-NEXT:    andq %rcx, %rax
-; GFNIAVX512F-NEXT:    leaq (%rax,%rdx,2), %rax
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_i64:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    bswapq %rdi
-; GFNIAVX512BW-NEXT:    movq %rdi, %rax
-; GFNIAVX512BW-NEXT:    shrq $4, %rax
-; GFNIAVX512BW-NEXT:    movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F
-; GFNIAVX512BW-NEXT:    andq %rcx, %rax
-; GFNIAVX512BW-NEXT:    andq %rcx, %rdi
-; GFNIAVX512BW-NEXT:    shlq $4, %rdi
-; GFNIAVX512BW-NEXT:    orq %rax, %rdi
-; GFNIAVX512BW-NEXT:    movabsq $3689348814741910323, %rax # imm = 0x3333333333333333
-; GFNIAVX512BW-NEXT:    movq %rdi, %rcx
-; GFNIAVX512BW-NEXT:    andq %rax, %rcx
-; GFNIAVX512BW-NEXT:    shrq $2, %rdi
-; GFNIAVX512BW-NEXT:    andq %rax, %rdi
-; GFNIAVX512BW-NEXT:    leaq (%rdi,%rcx,4), %rax
-; GFNIAVX512BW-NEXT:    movabsq $6148914691236517205, %rcx # imm = 0x5555555555555555
-; GFNIAVX512BW-NEXT:    movq %rax, %rdx
-; GFNIAVX512BW-NEXT:    andq %rcx, %rdx
-; GFNIAVX512BW-NEXT:    shrq %rax
-; GFNIAVX512BW-NEXT:    andq %rcx, %rax
-; GFNIAVX512BW-NEXT:    leaq (%rax,%rdx,2), %rax
-; GFNIAVX512BW-NEXT:    retq
   %b = call i64 @llvm.bitreverse.i64(i64 %a)
   ret i64 %b
 }
@@ -732,21 +474,6 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind {
 ; GFNIAVX:       # %bb.0:
 ; GFNIAVX-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_v16i8:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_v16i8:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v16i8:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512BW-NEXT:    retq
   %b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
   ret <16 x i8> %b
 }
@@ -827,24 +554,6 @@ define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) nounwind {
 ; GFNIAVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
 ; GFNIAVX-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_v8i16:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_v8i16:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v8i16:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512BW-NEXT:    retq
   %b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
   ret <8 x i16> %b
 }
@@ -930,24 +639,6 @@ define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) nounwind {
 ; GFNIAVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
 ; GFNIAVX-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_v4i32:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_v4i32:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v4i32:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512BW-NEXT:    retq
   %b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
   ret <4 x i32> %b
 }
@@ -1035,24 +726,6 @@ define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) nounwind {
 ; GFNIAVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
 ; GFNIAVX-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: test_bitreverse_v2i64:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNIAVX2-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: test_bitreverse_v2i64:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v2i64:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; GFNIAVX512BW-NEXT:    retq
   %b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
   ret <2 x i64> %b
 }
@@ -1198,14 +871,14 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm2, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v32i8:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v32i8:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v32i8:
 ; GFNIAVX2:       # %bb.0:
@@ -1213,17 +886,11 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
-; GFNIAVX512F-LABEL: test_bitreverse_v32i8:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v32i8:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512BW-NEXT:    retq
+; GFNIAVX512-LABEL: test_bitreverse_v32i8:
+; GFNIAVX512:       # %bb.0:
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    retq
   %b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
   ret <32 x i8> %b
 }
@@ -1388,17 +1055,17 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm3, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v16i16:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v16i16:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v16i16:
 ; GFNIAVX2:       # %bb.0:
@@ -1407,19 +1074,12 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind {
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
-; GFNIAVX512F-LABEL: test_bitreverse_v16i16:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
-; GFNIAVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v16i16:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512BW-NEXT:    retq
+; GFNIAVX512-LABEL: test_bitreverse_v16i16:
+; GFNIAVX512:       # %bb.0:
+; GFNIAVX512-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14,17,16,19,18,21,20,23,22,25,24,27,26,29,28,31,30]
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    retq
   %b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
   ret <16 x i16> %b
 }
@@ -1593,17 +1253,17 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm3, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v8i32:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v8i32:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v8i32:
 ; GFNIAVX2:       # %bb.0:
@@ -1612,19 +1272,12 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind {
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
-; GFNIAVX512F-LABEL: test_bitreverse_v8i32:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
-; GFNIAVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v8i32:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512BW-NEXT:    retq
+; GFNIAVX512-LABEL: test_bitreverse_v8i32:
+; GFNIAVX512:       # %bb.0:
+; GFNIAVX512-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12,19,18,17,16,23,22,21,20,27,26,25,24,31,30,29,28]
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    retq
   %b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
   ret <8 x i32> %b
 }
@@ -1802,17 +1455,17 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm3, %xmm1
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v4i64:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v4i64:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm2 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vpshufb %xmm2, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v4i64:
 ; GFNIAVX2:       # %bb.0:
@@ -1821,19 +1474,12 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind {
 ; GFNIAVX2-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
 ; GFNIAVX2-NEXT:    retq
 ;
-; GFNIAVX512F-LABEL: test_bitreverse_v4i64:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
-; GFNIAVX512F-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512F-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: test_bitreverse_v4i64:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
-; GFNIAVX512BW-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
-; GFNIAVX512BW-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
-; GFNIAVX512BW-NEXT:    retq
+; GFNIAVX512-LABEL: test_bitreverse_v4i64:
+; GFNIAVX512:       # %bb.0:
+; GFNIAVX512-NEXT:    vpshufb {{.*#+}} ymm0 = ymm0[7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8,23,22,21,20,19,18,17,16,31,30,29,28,27,26,25,24]
+; GFNIAVX512-NEXT:    vpbroadcastq {{.*#+}} ymm1 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745]
+; GFNIAVX512-NEXT:    vgf2p8affineqb $0, %ymm1, %ymm0, %ymm0
+; GFNIAVX512-NEXT:    retq
   %b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
   ret <4 x i64> %b
 }
@@ -2085,18 +1731,18 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm4, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v64i8:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v64i8:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v64i8:
 ; GFNIAVX2:       # %bb.0:
@@ -2408,23 +2054,23 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm5, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v32i16:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v32i16:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14]
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v32i16:
 ; GFNIAVX2:       # %bb.0:
@@ -2760,23 +2406,23 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm5, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v16i32:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v16i32:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v16i32:
 ; GFNIAVX2:       # %bb.0:
@@ -3120,23 +2766,23 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind {
 ; GFNISSE-NEXT:    gf2p8affineqb $0, %xmm5, %xmm3
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: test_bitreverse_v8i64:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; GFNIAVX-NEXT:    vextractf128 $1, %ymm1, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
-; GFNIAVX-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
-; GFNIAVX-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: test_bitreverse_v8i64:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm3 = [7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8]
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vmovdqa {{.*#+}} xmm4 = [9241421688590303745,9241421688590303745]
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm0, %xmm0
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; GFNIAVX1-NEXT:    vextractf128 $1, %ymm1, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm2, %xmm2
+; GFNIAVX1-NEXT:    vpshufb %xmm3, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vgf2p8affineqb $0, %xmm4, %xmm1, %xmm1
+; GFNIAVX1-NEXT:    vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: test_bitreverse_v8i64:
 ; GFNIAVX2:       # %bb.0:
@@ -3207,21 +2853,6 @@ define <16 x i8> @fold_bitreverse_v16i8() nounwind {
 ; GFNIAVX:       # %bb.0:
 ; GFNIAVX-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: fold_bitreverse_v16i8:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: fold_bitreverse_v16i8:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: fold_bitreverse_v16i8:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vmovaps {{.*#+}} xmm0 = [0,255,64,191,32,223,96,159,16,239,80,175,48,207,112,143]
-; GFNIAVX512BW-NEXT:    retq
   %b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> <i8 0, i8 -1, i8 2, i8 -3, i8 4, i8 -5, i8 6, i8 -7, i8 8, i8 -9, i8 10, i8 -11, i8 12, i8 -13, i8 14, i8 -15>)
   ret <16 x i8> %b
 }
@@ -3253,21 +2884,6 @@ define <16 x i16> @fold_bitreverse_v16i16() nounwind {
 ; GFNIAVX:       # %bb.0:
 ; GFNIAVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
 ; GFNIAVX-NEXT:    retq
-;
-; GFNIAVX2-LABEL: fold_bitreverse_v16i16:
-; GFNIAVX2:       # %bb.0:
-; GFNIAVX2-NEXT:    vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
-; GFNIAVX2-NEXT:    retq
-;
-; GFNIAVX512F-LABEL: fold_bitreverse_v16i16:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: fold_bitreverse_v16i16:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vmovaps {{.*#+}} ymm0 = [0,65535,16384,49151,8192,57343,24576,40959,4096,61439,20480,45055,12288,53247,28672,36863]
-; GFNIAVX512BW-NEXT:    retq
   %b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> <i16 0, i16 -1, i16 2, i16 -3, i16 4, i16 -5, i16 6, i16 -7, i16 8, i16 -9, i16 10, i16 -11, i16 12, i16 -13, i16 14, i16 -15>)
   ret <16 x i16> %b
 }
@@ -3312,11 +2928,11 @@ define <16 x i32> @fold_bitreverse_v16i32() nounwind {
 ; GFNISSE-NEXT:    movaps {{.*#+}} xmm3 = [805306368,3489660927,1879048192,2415919103]
 ; GFNISSE-NEXT:    retq
 ;
-; GFNIAVX-LABEL: fold_bitreverse_v16i32:
-; GFNIAVX:       # %bb.0:
-; GFNIAVX-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
-; GFNIAVX-NEXT:    vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
-; GFNIAVX-NEXT:    retq
+; GFNIAVX1-LABEL: fold_bitreverse_v16i32:
+; GFNIAVX1:       # %bb.0:
+; GFNIAVX1-NEXT:    vmovaps {{.*#+}} ymm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559]
+; GFNIAVX1-NEXT:    vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; GFNIAVX1-NEXT:    retq
 ;
 ; GFNIAVX2-LABEL: fold_bitreverse_v16i32:
 ; GFNIAVX2:       # %bb.0:
@@ -3324,15 +2940,10 @@ define <16 x i32> @fold_bitreverse_v16i32() nounwind {
 ; GFNIAVX2-NEXT:    vmovaps {{.*#+}} ymm1 = [268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
 ; GFNIAVX2-NEXT:    retq
 ;
-; GFNIAVX512F-LABEL: fold_bitreverse_v16i32:
-; GFNIAVX512F:       # %bb.0:
-; GFNIAVX512F-NEXT:    vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
-; GFNIAVX512F-NEXT:    retq
-;
-; GFNIAVX512BW-LABEL: fold_bitreverse_v16i32:
-; GFNIAVX512BW:       # %bb.0:
-; GFNIAVX512BW-NEXT:    vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
-; GFNIAVX512BW-NEXT:    retq
+; GFNIAVX512-LABEL: fold_bitreverse_v16i32:
+; GFNIAVX512:       # %bb.0:
+; GFNIAVX512-NEXT:    vmovaps {{.*#+}} zmm0 = [0,4294967295,1073741824,3221225471,536870912,3758096383,1610612736,2684354559,268435456,4026531839,1342177280,2952790015,805306368,3489660927,1879048192,2415919103]
+; GFNIAVX512-NEXT:    retq
   %b = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> <i32 0, i32 -1, i32 2, i32 -3, i32 4, i32 -5, i32 6, i32 -7, i32 8, i32 -9, i32 10, i32 -11, i32 12, i32 -13, i32 14, i32 -15>)
   ret <16 x i32> %b
 }


        


More information about the llvm-commits mailing list