[llvm] 0810af7 - [NFC][X86][Codegen] Add tests for splat-storing the same scalar to memory

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 14 16:50:32 PST 2023


Author: Roman Lebedev
Date: 2023-01-15T03:50:13+03:00
New Revision: 0810af720829f62a003d13540a49a571a28c415b

URL: https://github.com/llvm/llvm-project/commit/0810af720829f62a003d13540a49a571a28c415b
DIFF: https://github.com/llvm/llvm-project/commit/0810af720829f62a003d13540a49a571a28c415b.diff

LOG: [NFC][X86][Codegen] Add tests for splat-storing the same scalar to memory

Added: 
    llvm/test/CodeGen/X86/elementwise-store-of-scalar-splat.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/elementwise-store-of-scalar-splat.ll b/llvm/test/CodeGen/X86/elementwise-store-of-scalar-splat.ll
new file mode 100644
index 000000000000..a1eac920d449
--- /dev/null
+++ b/llvm/test/CodeGen/X86/elementwise-store-of-scalar-splat.ll
@@ -0,0 +1,581 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,SSE,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=ALL,SSE,SSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefixes=ALL,SSE,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.1 | FileCheck %s --check-prefixes=ALL,SSE,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s --check-prefixes=ALL,SSE,SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=ALL,AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=ALL,AVX512,AVX512BW
+
+define void @vec512_i8(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movzbl (%rdi), %eax
+; ALL-NEXT:    notb %al
+; ALL-NEXT:    movb %al, (%rsi)
+; ALL-NEXT:    movb %al, 1(%rsi)
+; ALL-NEXT:    movb %al, 2(%rsi)
+; ALL-NEXT:    movb %al, 3(%rsi)
+; ALL-NEXT:    movb %al, 4(%rsi)
+; ALL-NEXT:    movb %al, 5(%rsi)
+; ALL-NEXT:    movb %al, 6(%rsi)
+; ALL-NEXT:    movb %al, 7(%rsi)
+; ALL-NEXT:    movb %al, 8(%rsi)
+; ALL-NEXT:    movb %al, 9(%rsi)
+; ALL-NEXT:    movb %al, 10(%rsi)
+; ALL-NEXT:    movb %al, 11(%rsi)
+; ALL-NEXT:    movb %al, 12(%rsi)
+; ALL-NEXT:    movb %al, 13(%rsi)
+; ALL-NEXT:    movb %al, 14(%rsi)
+; ALL-NEXT:    movb %al, 15(%rsi)
+; ALL-NEXT:    movb %al, 16(%rsi)
+; ALL-NEXT:    movb %al, 17(%rsi)
+; ALL-NEXT:    movb %al, 18(%rsi)
+; ALL-NEXT:    movb %al, 19(%rsi)
+; ALL-NEXT:    movb %al, 20(%rsi)
+; ALL-NEXT:    movb %al, 21(%rsi)
+; ALL-NEXT:    movb %al, 22(%rsi)
+; ALL-NEXT:    movb %al, 23(%rsi)
+; ALL-NEXT:    movb %al, 24(%rsi)
+; ALL-NEXT:    movb %al, 25(%rsi)
+; ALL-NEXT:    movb %al, 26(%rsi)
+; ALL-NEXT:    movb %al, 27(%rsi)
+; ALL-NEXT:    movb %al, 28(%rsi)
+; ALL-NEXT:    movb %al, 29(%rsi)
+; ALL-NEXT:    movb %al, 30(%rsi)
+; ALL-NEXT:    movb %al, 31(%rsi)
+; ALL-NEXT:    movb %al, 32(%rsi)
+; ALL-NEXT:    movb %al, 33(%rsi)
+; ALL-NEXT:    movb %al, 34(%rsi)
+; ALL-NEXT:    movb %al, 35(%rsi)
+; ALL-NEXT:    movb %al, 36(%rsi)
+; ALL-NEXT:    movb %al, 37(%rsi)
+; ALL-NEXT:    movb %al, 38(%rsi)
+; ALL-NEXT:    movb %al, 39(%rsi)
+; ALL-NEXT:    movb %al, 40(%rsi)
+; ALL-NEXT:    movb %al, 41(%rsi)
+; ALL-NEXT:    movb %al, 42(%rsi)
+; ALL-NEXT:    movb %al, 43(%rsi)
+; ALL-NEXT:    movb %al, 44(%rsi)
+; ALL-NEXT:    movb %al, 45(%rsi)
+; ALL-NEXT:    movb %al, 46(%rsi)
+; ALL-NEXT:    movb %al, 47(%rsi)
+; ALL-NEXT:    movb %al, 48(%rsi)
+; ALL-NEXT:    movb %al, 49(%rsi)
+; ALL-NEXT:    movb %al, 50(%rsi)
+; ALL-NEXT:    movb %al, 51(%rsi)
+; ALL-NEXT:    movb %al, 52(%rsi)
+; ALL-NEXT:    movb %al, 53(%rsi)
+; ALL-NEXT:    movb %al, 54(%rsi)
+; ALL-NEXT:    movb %al, 55(%rsi)
+; ALL-NEXT:    movb %al, 56(%rsi)
+; ALL-NEXT:    movb %al, 57(%rsi)
+; ALL-NEXT:    movb %al, 58(%rsi)
+; ALL-NEXT:    movb %al, 59(%rsi)
+; ALL-NEXT:    movb %al, 60(%rsi)
+; ALL-NEXT:    movb %al, 61(%rsi)
+; ALL-NEXT:    movb %al, 62(%rsi)
+; ALL-NEXT:    movb %al, 63(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i8, ptr %in.elt.ptr, align 64
+  %in.elt = xor i8 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i8, ptr %out.vec.ptr, i64 0
+  store i8 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i8, ptr %out.vec.ptr, i64 1
+  store i8 %in.elt, ptr %out.elt1.ptr, align 1
+  %out.elt2.ptr = getelementptr i8, ptr %out.vec.ptr, i64 2
+  store i8 %in.elt, ptr %out.elt2.ptr, align 2
+  %out.elt3.ptr = getelementptr i8, ptr %out.vec.ptr, i64 3
+  store i8 %in.elt, ptr %out.elt3.ptr, align 1
+  %out.elt4.ptr = getelementptr i8, ptr %out.vec.ptr, i64 4
+  store i8 %in.elt, ptr %out.elt4.ptr, align 4
+  %out.elt5.ptr = getelementptr i8, ptr %out.vec.ptr, i64 5
+  store i8 %in.elt, ptr %out.elt5.ptr, align 1
+  %out.elt6.ptr = getelementptr i8, ptr %out.vec.ptr, i64 6
+  store i8 %in.elt, ptr %out.elt6.ptr, align 2
+  %out.elt7.ptr = getelementptr i8, ptr %out.vec.ptr, i64 7
+  store i8 %in.elt, ptr %out.elt7.ptr, align 1
+  %out.elt8.ptr = getelementptr i8, ptr %out.vec.ptr, i64 8
+  store i8 %in.elt, ptr %out.elt8.ptr, align 8
+  %out.elt9.ptr = getelementptr i8, ptr %out.vec.ptr, i64 9
+  store i8 %in.elt, ptr %out.elt9.ptr, align 1
+  %out.elt10.ptr = getelementptr i8, ptr %out.vec.ptr, i64 10
+  store i8 %in.elt, ptr %out.elt10.ptr, align 2
+  %out.elt11.ptr = getelementptr i8, ptr %out.vec.ptr, i64 11
+  store i8 %in.elt, ptr %out.elt11.ptr, align 1
+  %out.elt12.ptr = getelementptr i8, ptr %out.vec.ptr, i64 12
+  store i8 %in.elt, ptr %out.elt12.ptr, align 4
+  %out.elt13.ptr = getelementptr i8, ptr %out.vec.ptr, i64 13
+  store i8 %in.elt, ptr %out.elt13.ptr, align 1
+  %out.elt14.ptr = getelementptr i8, ptr %out.vec.ptr, i64 14
+  store i8 %in.elt, ptr %out.elt14.ptr, align 2
+  %out.elt15.ptr = getelementptr i8, ptr %out.vec.ptr, i64 15
+  store i8 %in.elt, ptr %out.elt15.ptr, align 1
+  %out.elt16.ptr = getelementptr i8, ptr %out.vec.ptr, i64 16
+  store i8 %in.elt, ptr %out.elt16.ptr, align 16
+  %out.elt17.ptr = getelementptr i8, ptr %out.vec.ptr, i64 17
+  store i8 %in.elt, ptr %out.elt17.ptr, align 1
+  %out.elt18.ptr = getelementptr i8, ptr %out.vec.ptr, i64 18
+  store i8 %in.elt, ptr %out.elt18.ptr, align 2
+  %out.elt19.ptr = getelementptr i8, ptr %out.vec.ptr, i64 19
+  store i8 %in.elt, ptr %out.elt19.ptr, align 1
+  %out.elt20.ptr = getelementptr i8, ptr %out.vec.ptr, i64 20
+  store i8 %in.elt, ptr %out.elt20.ptr, align 4
+  %out.elt21.ptr = getelementptr i8, ptr %out.vec.ptr, i64 21
+  store i8 %in.elt, ptr %out.elt21.ptr, align 1
+  %out.elt22.ptr = getelementptr i8, ptr %out.vec.ptr, i64 22
+  store i8 %in.elt, ptr %out.elt22.ptr, align 2
+  %out.elt23.ptr = getelementptr i8, ptr %out.vec.ptr, i64 23
+  store i8 %in.elt, ptr %out.elt23.ptr, align 1
+  %out.elt24.ptr = getelementptr i8, ptr %out.vec.ptr, i64 24
+  store i8 %in.elt, ptr %out.elt24.ptr, align 8
+  %out.elt25.ptr = getelementptr i8, ptr %out.vec.ptr, i64 25
+  store i8 %in.elt, ptr %out.elt25.ptr, align 1
+  %out.elt26.ptr = getelementptr i8, ptr %out.vec.ptr, i64 26
+  store i8 %in.elt, ptr %out.elt26.ptr, align 2
+  %out.elt27.ptr = getelementptr i8, ptr %out.vec.ptr, i64 27
+  store i8 %in.elt, ptr %out.elt27.ptr, align 1
+  %out.elt28.ptr = getelementptr i8, ptr %out.vec.ptr, i64 28
+  store i8 %in.elt, ptr %out.elt28.ptr, align 4
+  %out.elt29.ptr = getelementptr i8, ptr %out.vec.ptr, i64 29
+  store i8 %in.elt, ptr %out.elt29.ptr, align 1
+  %out.elt30.ptr = getelementptr i8, ptr %out.vec.ptr, i64 30
+  store i8 %in.elt, ptr %out.elt30.ptr, align 2
+  %out.elt31.ptr = getelementptr i8, ptr %out.vec.ptr, i64 31
+  store i8 %in.elt, ptr %out.elt31.ptr, align 1
+  %out.elt32.ptr = getelementptr i8, ptr %out.vec.ptr, i64 32
+  store i8 %in.elt, ptr %out.elt32.ptr, align 32
+  %out.elt33.ptr = getelementptr i8, ptr %out.vec.ptr, i64 33
+  store i8 %in.elt, ptr %out.elt33.ptr, align 1
+  %out.elt34.ptr = getelementptr i8, ptr %out.vec.ptr, i64 34
+  store i8 %in.elt, ptr %out.elt34.ptr, align 2
+  %out.elt35.ptr = getelementptr i8, ptr %out.vec.ptr, i64 35
+  store i8 %in.elt, ptr %out.elt35.ptr, align 1
+  %out.elt36.ptr = getelementptr i8, ptr %out.vec.ptr, i64 36
+  store i8 %in.elt, ptr %out.elt36.ptr, align 4
+  %out.elt37.ptr = getelementptr i8, ptr %out.vec.ptr, i64 37
+  store i8 %in.elt, ptr %out.elt37.ptr, align 1
+  %out.elt38.ptr = getelementptr i8, ptr %out.vec.ptr, i64 38
+  store i8 %in.elt, ptr %out.elt38.ptr, align 2
+  %out.elt39.ptr = getelementptr i8, ptr %out.vec.ptr, i64 39
+  store i8 %in.elt, ptr %out.elt39.ptr, align 1
+  %out.elt40.ptr = getelementptr i8, ptr %out.vec.ptr, i64 40
+  store i8 %in.elt, ptr %out.elt40.ptr, align 8
+  %out.elt41.ptr = getelementptr i8, ptr %out.vec.ptr, i64 41
+  store i8 %in.elt, ptr %out.elt41.ptr, align 1
+  %out.elt42.ptr = getelementptr i8, ptr %out.vec.ptr, i64 42
+  store i8 %in.elt, ptr %out.elt42.ptr, align 2
+  %out.elt43.ptr = getelementptr i8, ptr %out.vec.ptr, i64 43
+  store i8 %in.elt, ptr %out.elt43.ptr, align 1
+  %out.elt44.ptr = getelementptr i8, ptr %out.vec.ptr, i64 44
+  store i8 %in.elt, ptr %out.elt44.ptr, align 4
+  %out.elt45.ptr = getelementptr i8, ptr %out.vec.ptr, i64 45
+  store i8 %in.elt, ptr %out.elt45.ptr, align 1
+  %out.elt46.ptr = getelementptr i8, ptr %out.vec.ptr, i64 46
+  store i8 %in.elt, ptr %out.elt46.ptr, align 2
+  %out.elt47.ptr = getelementptr i8, ptr %out.vec.ptr, i64 47
+  store i8 %in.elt, ptr %out.elt47.ptr, align 1
+  %out.elt48.ptr = getelementptr i8, ptr %out.vec.ptr, i64 48
+  store i8 %in.elt, ptr %out.elt48.ptr, align 16
+  %out.elt49.ptr = getelementptr i8, ptr %out.vec.ptr, i64 49
+  store i8 %in.elt, ptr %out.elt49.ptr, align 1
+  %out.elt50.ptr = getelementptr i8, ptr %out.vec.ptr, i64 50
+  store i8 %in.elt, ptr %out.elt50.ptr, align 2
+  %out.elt51.ptr = getelementptr i8, ptr %out.vec.ptr, i64 51
+  store i8 %in.elt, ptr %out.elt51.ptr, align 1
+  %out.elt52.ptr = getelementptr i8, ptr %out.vec.ptr, i64 52
+  store i8 %in.elt, ptr %out.elt52.ptr, align 4
+  %out.elt53.ptr = getelementptr i8, ptr %out.vec.ptr, i64 53
+  store i8 %in.elt, ptr %out.elt53.ptr, align 1
+  %out.elt54.ptr = getelementptr i8, ptr %out.vec.ptr, i64 54
+  store i8 %in.elt, ptr %out.elt54.ptr, align 2
+  %out.elt55.ptr = getelementptr i8, ptr %out.vec.ptr, i64 55
+  store i8 %in.elt, ptr %out.elt55.ptr, align 1
+  %out.elt56.ptr = getelementptr i8, ptr %out.vec.ptr, i64 56
+  store i8 %in.elt, ptr %out.elt56.ptr, align 8
+  %out.elt57.ptr = getelementptr i8, ptr %out.vec.ptr, i64 57
+  store i8 %in.elt, ptr %out.elt57.ptr, align 1
+  %out.elt58.ptr = getelementptr i8, ptr %out.vec.ptr, i64 58
+  store i8 %in.elt, ptr %out.elt58.ptr, align 2
+  %out.elt59.ptr = getelementptr i8, ptr %out.vec.ptr, i64 59
+  store i8 %in.elt, ptr %out.elt59.ptr, align 1
+  %out.elt60.ptr = getelementptr i8, ptr %out.vec.ptr, i64 60
+  store i8 %in.elt, ptr %out.elt60.ptr, align 4
+  %out.elt61.ptr = getelementptr i8, ptr %out.vec.ptr, i64 61
+  store i8 %in.elt, ptr %out.elt61.ptr, align 1
+  %out.elt62.ptr = getelementptr i8, ptr %out.vec.ptr, i64 62
+  store i8 %in.elt, ptr %out.elt62.ptr, align 2
+  %out.elt63.ptr = getelementptr i8, ptr %out.vec.ptr, i64 63
+  store i8 %in.elt, ptr %out.elt63.ptr, align 1
+  ret void
+}
+
+define void @vec512_i16(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i16:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movl (%rdi), %eax
+; ALL-NEXT:    notl %eax
+; ALL-NEXT:    movw %ax, (%rsi)
+; ALL-NEXT:    movw %ax, 2(%rsi)
+; ALL-NEXT:    movw %ax, 4(%rsi)
+; ALL-NEXT:    movw %ax, 6(%rsi)
+; ALL-NEXT:    movw %ax, 8(%rsi)
+; ALL-NEXT:    movw %ax, 10(%rsi)
+; ALL-NEXT:    movw %ax, 12(%rsi)
+; ALL-NEXT:    movw %ax, 14(%rsi)
+; ALL-NEXT:    movw %ax, 16(%rsi)
+; ALL-NEXT:    movw %ax, 18(%rsi)
+; ALL-NEXT:    movw %ax, 20(%rsi)
+; ALL-NEXT:    movw %ax, 22(%rsi)
+; ALL-NEXT:    movw %ax, 24(%rsi)
+; ALL-NEXT:    movw %ax, 26(%rsi)
+; ALL-NEXT:    movw %ax, 28(%rsi)
+; ALL-NEXT:    movw %ax, 30(%rsi)
+; ALL-NEXT:    movw %ax, 32(%rsi)
+; ALL-NEXT:    movw %ax, 34(%rsi)
+; ALL-NEXT:    movw %ax, 36(%rsi)
+; ALL-NEXT:    movw %ax, 38(%rsi)
+; ALL-NEXT:    movw %ax, 40(%rsi)
+; ALL-NEXT:    movw %ax, 42(%rsi)
+; ALL-NEXT:    movw %ax, 44(%rsi)
+; ALL-NEXT:    movw %ax, 46(%rsi)
+; ALL-NEXT:    movw %ax, 48(%rsi)
+; ALL-NEXT:    movw %ax, 50(%rsi)
+; ALL-NEXT:    movw %ax, 52(%rsi)
+; ALL-NEXT:    movw %ax, 54(%rsi)
+; ALL-NEXT:    movw %ax, 56(%rsi)
+; ALL-NEXT:    movw %ax, 58(%rsi)
+; ALL-NEXT:    movw %ax, 60(%rsi)
+; ALL-NEXT:    movw %ax, 62(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i16, ptr %in.elt.ptr, align 64
+  %in.elt = xor i16 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i16, ptr %out.vec.ptr, i64 0
+  store i16 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i16, ptr %out.vec.ptr, i64 1
+  store i16 %in.elt, ptr %out.elt1.ptr, align 2
+  %out.elt2.ptr = getelementptr i16, ptr %out.vec.ptr, i64 2
+  store i16 %in.elt, ptr %out.elt2.ptr, align 4
+  %out.elt3.ptr = getelementptr i16, ptr %out.vec.ptr, i64 3
+  store i16 %in.elt, ptr %out.elt3.ptr, align 2
+  %out.elt4.ptr = getelementptr i16, ptr %out.vec.ptr, i64 4
+  store i16 %in.elt, ptr %out.elt4.ptr, align 8
+  %out.elt5.ptr = getelementptr i16, ptr %out.vec.ptr, i64 5
+  store i16 %in.elt, ptr %out.elt5.ptr, align 2
+  %out.elt6.ptr = getelementptr i16, ptr %out.vec.ptr, i64 6
+  store i16 %in.elt, ptr %out.elt6.ptr, align 4
+  %out.elt7.ptr = getelementptr i16, ptr %out.vec.ptr, i64 7
+  store i16 %in.elt, ptr %out.elt7.ptr, align 2
+  %out.elt8.ptr = getelementptr i16, ptr %out.vec.ptr, i64 8
+  store i16 %in.elt, ptr %out.elt8.ptr, align 16
+  %out.elt9.ptr = getelementptr i16, ptr %out.vec.ptr, i64 9
+  store i16 %in.elt, ptr %out.elt9.ptr, align 2
+  %out.elt10.ptr = getelementptr i16, ptr %out.vec.ptr, i64 10
+  store i16 %in.elt, ptr %out.elt10.ptr, align 4
+  %out.elt11.ptr = getelementptr i16, ptr %out.vec.ptr, i64 11
+  store i16 %in.elt, ptr %out.elt11.ptr, align 2
+  %out.elt12.ptr = getelementptr i16, ptr %out.vec.ptr, i64 12
+  store i16 %in.elt, ptr %out.elt12.ptr, align 8
+  %out.elt13.ptr = getelementptr i16, ptr %out.vec.ptr, i64 13
+  store i16 %in.elt, ptr %out.elt13.ptr, align 2
+  %out.elt14.ptr = getelementptr i16, ptr %out.vec.ptr, i64 14
+  store i16 %in.elt, ptr %out.elt14.ptr, align 4
+  %out.elt15.ptr = getelementptr i16, ptr %out.vec.ptr, i64 15
+  store i16 %in.elt, ptr %out.elt15.ptr, align 2
+  %out.elt16.ptr = getelementptr i16, ptr %out.vec.ptr, i64 16
+  store i16 %in.elt, ptr %out.elt16.ptr, align 32
+  %out.elt17.ptr = getelementptr i16, ptr %out.vec.ptr, i64 17
+  store i16 %in.elt, ptr %out.elt17.ptr, align 2
+  %out.elt18.ptr = getelementptr i16, ptr %out.vec.ptr, i64 18
+  store i16 %in.elt, ptr %out.elt18.ptr, align 4
+  %out.elt19.ptr = getelementptr i16, ptr %out.vec.ptr, i64 19
+  store i16 %in.elt, ptr %out.elt19.ptr, align 2
+  %out.elt20.ptr = getelementptr i16, ptr %out.vec.ptr, i64 20
+  store i16 %in.elt, ptr %out.elt20.ptr, align 8
+  %out.elt21.ptr = getelementptr i16, ptr %out.vec.ptr, i64 21
+  store i16 %in.elt, ptr %out.elt21.ptr, align 2
+  %out.elt22.ptr = getelementptr i16, ptr %out.vec.ptr, i64 22
+  store i16 %in.elt, ptr %out.elt22.ptr, align 4
+  %out.elt23.ptr = getelementptr i16, ptr %out.vec.ptr, i64 23
+  store i16 %in.elt, ptr %out.elt23.ptr, align 2
+  %out.elt24.ptr = getelementptr i16, ptr %out.vec.ptr, i64 24
+  store i16 %in.elt, ptr %out.elt24.ptr, align 16
+  %out.elt25.ptr = getelementptr i16, ptr %out.vec.ptr, i64 25
+  store i16 %in.elt, ptr %out.elt25.ptr, align 2
+  %out.elt26.ptr = getelementptr i16, ptr %out.vec.ptr, i64 26
+  store i16 %in.elt, ptr %out.elt26.ptr, align 4
+  %out.elt27.ptr = getelementptr i16, ptr %out.vec.ptr, i64 27
+  store i16 %in.elt, ptr %out.elt27.ptr, align 2
+  %out.elt28.ptr = getelementptr i16, ptr %out.vec.ptr, i64 28
+  store i16 %in.elt, ptr %out.elt28.ptr, align 8
+  %out.elt29.ptr = getelementptr i16, ptr %out.vec.ptr, i64 29
+  store i16 %in.elt, ptr %out.elt29.ptr, align 2
+  %out.elt30.ptr = getelementptr i16, ptr %out.vec.ptr, i64 30
+  store i16 %in.elt, ptr %out.elt30.ptr, align 4
+  %out.elt31.ptr = getelementptr i16, ptr %out.vec.ptr, i64 31
+  store i16 %in.elt, ptr %out.elt31.ptr, align 2
+  ret void
+}
+
+define void @vec512_i32(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i32:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movl (%rdi), %eax
+; ALL-NEXT:    notl %eax
+; ALL-NEXT:    movl %eax, (%rsi)
+; ALL-NEXT:    movl %eax, 4(%rsi)
+; ALL-NEXT:    movl %eax, 8(%rsi)
+; ALL-NEXT:    movl %eax, 12(%rsi)
+; ALL-NEXT:    movl %eax, 16(%rsi)
+; ALL-NEXT:    movl %eax, 20(%rsi)
+; ALL-NEXT:    movl %eax, 24(%rsi)
+; ALL-NEXT:    movl %eax, 28(%rsi)
+; ALL-NEXT:    movl %eax, 32(%rsi)
+; ALL-NEXT:    movl %eax, 36(%rsi)
+; ALL-NEXT:    movl %eax, 40(%rsi)
+; ALL-NEXT:    movl %eax, 44(%rsi)
+; ALL-NEXT:    movl %eax, 48(%rsi)
+; ALL-NEXT:    movl %eax, 52(%rsi)
+; ALL-NEXT:    movl %eax, 56(%rsi)
+; ALL-NEXT:    movl %eax, 60(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+  %in.elt = xor i32 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i32, ptr %out.vec.ptr, i64 0
+  store i32 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i32, ptr %out.vec.ptr, i64 1
+  store i32 %in.elt, ptr %out.elt1.ptr, align 4
+  %out.elt2.ptr = getelementptr i32, ptr %out.vec.ptr, i64 2
+  store i32 %in.elt, ptr %out.elt2.ptr, align 8
+  %out.elt3.ptr = getelementptr i32, ptr %out.vec.ptr, i64 3
+  store i32 %in.elt, ptr %out.elt3.ptr, align 4
+  %out.elt4.ptr = getelementptr i32, ptr %out.vec.ptr, i64 4
+  store i32 %in.elt, ptr %out.elt4.ptr, align 16
+  %out.elt5.ptr = getelementptr i32, ptr %out.vec.ptr, i64 5
+  store i32 %in.elt, ptr %out.elt5.ptr, align 4
+  %out.elt6.ptr = getelementptr i32, ptr %out.vec.ptr, i64 6
+  store i32 %in.elt, ptr %out.elt6.ptr, align 8
+  %out.elt7.ptr = getelementptr i32, ptr %out.vec.ptr, i64 7
+  store i32 %in.elt, ptr %out.elt7.ptr, align 4
+  %out.elt8.ptr = getelementptr i32, ptr %out.vec.ptr, i64 8
+  store i32 %in.elt, ptr %out.elt8.ptr, align 32
+  %out.elt9.ptr = getelementptr i32, ptr %out.vec.ptr, i64 9
+  store i32 %in.elt, ptr %out.elt9.ptr, align 4
+  %out.elt10.ptr = getelementptr i32, ptr %out.vec.ptr, i64 10
+  store i32 %in.elt, ptr %out.elt10.ptr, align 8
+  %out.elt11.ptr = getelementptr i32, ptr %out.vec.ptr, i64 11
+  store i32 %in.elt, ptr %out.elt11.ptr, align 4
+  %out.elt12.ptr = getelementptr i32, ptr %out.vec.ptr, i64 12
+  store i32 %in.elt, ptr %out.elt12.ptr, align 16
+  %out.elt13.ptr = getelementptr i32, ptr %out.vec.ptr, i64 13
+  store i32 %in.elt, ptr %out.elt13.ptr, align 4
+  %out.elt14.ptr = getelementptr i32, ptr %out.vec.ptr, i64 14
+  store i32 %in.elt, ptr %out.elt14.ptr, align 8
+  %out.elt15.ptr = getelementptr i32, ptr %out.vec.ptr, i64 15
+  store i32 %in.elt, ptr %out.elt15.ptr, align 4
+  ret void
+}
+
+define void @vec512_float(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_float:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movl (%rdi), %eax
+; ALL-NEXT:    notl %eax
+; ALL-NEXT:    movl %eax, (%rsi)
+; ALL-NEXT:    movl %eax, 4(%rsi)
+; ALL-NEXT:    movl %eax, 8(%rsi)
+; ALL-NEXT:    movl %eax, 12(%rsi)
+; ALL-NEXT:    movl %eax, 16(%rsi)
+; ALL-NEXT:    movl %eax, 20(%rsi)
+; ALL-NEXT:    movl %eax, 24(%rsi)
+; ALL-NEXT:    movl %eax, 28(%rsi)
+; ALL-NEXT:    movl %eax, 32(%rsi)
+; ALL-NEXT:    movl %eax, 36(%rsi)
+; ALL-NEXT:    movl %eax, 40(%rsi)
+; ALL-NEXT:    movl %eax, 44(%rsi)
+; ALL-NEXT:    movl %eax, 48(%rsi)
+; ALL-NEXT:    movl %eax, 52(%rsi)
+; ALL-NEXT:    movl %eax, 56(%rsi)
+; ALL-NEXT:    movl %eax, 60(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i32, ptr %in.elt.ptr, align 64
+  %in.elt.int = xor i32 %in.elt.not, -1
+  %in.elt = bitcast i32 %in.elt.int to float
+  %out.elt0.ptr = getelementptr float, ptr %out.vec.ptr, i64 0
+  store float %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr float, ptr %out.vec.ptr, i64 1
+  store float %in.elt, ptr %out.elt1.ptr, align 4
+  %out.elt2.ptr = getelementptr float, ptr %out.vec.ptr, i64 2
+  store float %in.elt, ptr %out.elt2.ptr, align 8
+  %out.elt3.ptr = getelementptr float, ptr %out.vec.ptr, i64 3
+  store float %in.elt, ptr %out.elt3.ptr, align 4
+  %out.elt4.ptr = getelementptr float, ptr %out.vec.ptr, i64 4
+  store float %in.elt, ptr %out.elt4.ptr, align 16
+  %out.elt5.ptr = getelementptr float, ptr %out.vec.ptr, i64 5
+  store float %in.elt, ptr %out.elt5.ptr, align 4
+  %out.elt6.ptr = getelementptr float, ptr %out.vec.ptr, i64 6
+  store float %in.elt, ptr %out.elt6.ptr, align 8
+  %out.elt7.ptr = getelementptr float, ptr %out.vec.ptr, i64 7
+  store float %in.elt, ptr %out.elt7.ptr, align 4
+  %out.elt8.ptr = getelementptr float, ptr %out.vec.ptr, i64 8
+  store float %in.elt, ptr %out.elt8.ptr, align 32
+  %out.elt9.ptr = getelementptr float, ptr %out.vec.ptr, i64 9
+  store float %in.elt, ptr %out.elt9.ptr, align 4
+  %out.elt10.ptr = getelementptr float, ptr %out.vec.ptr, i64 10
+  store float %in.elt, ptr %out.elt10.ptr, align 8
+  %out.elt11.ptr = getelementptr float, ptr %out.vec.ptr, i64 11
+  store float %in.elt, ptr %out.elt11.ptr, align 4
+  %out.elt12.ptr = getelementptr float, ptr %out.vec.ptr, i64 12
+  store float %in.elt, ptr %out.elt12.ptr, align 16
+  %out.elt13.ptr = getelementptr float, ptr %out.vec.ptr, i64 13
+  store float %in.elt, ptr %out.elt13.ptr, align 4
+  %out.elt14.ptr = getelementptr float, ptr %out.vec.ptr, i64 14
+  store float %in.elt, ptr %out.elt14.ptr, align 8
+  %out.elt15.ptr = getelementptr float, ptr %out.vec.ptr, i64 15
+  store float %in.elt, ptr %out.elt15.ptr, align 4
+  ret void
+}
+
+define void @vec512_i64(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i64:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq (%rdi), %rax
+; ALL-NEXT:    notq %rax
+; ALL-NEXT:    movq %rax, (%rsi)
+; ALL-NEXT:    movq %rax, 8(%rsi)
+; ALL-NEXT:    movq %rax, 16(%rsi)
+; ALL-NEXT:    movq %rax, 24(%rsi)
+; ALL-NEXT:    movq %rax, 32(%rsi)
+; ALL-NEXT:    movq %rax, 40(%rsi)
+; ALL-NEXT:    movq %rax, 48(%rsi)
+; ALL-NEXT:    movq %rax, 56(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+  %in.elt = xor i64 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+  store i64 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+  store i64 %in.elt, ptr %out.elt1.ptr, align 8
+  %out.elt2.ptr = getelementptr i64, ptr %out.vec.ptr, i64 2
+  store i64 %in.elt, ptr %out.elt2.ptr, align 16
+  %out.elt3.ptr = getelementptr i64, ptr %out.vec.ptr, i64 3
+  store i64 %in.elt, ptr %out.elt3.ptr, align 8
+  %out.elt4.ptr = getelementptr i64, ptr %out.vec.ptr, i64 4
+  store i64 %in.elt, ptr %out.elt4.ptr, align 32
+  %out.elt5.ptr = getelementptr i64, ptr %out.vec.ptr, i64 5
+  store i64 %in.elt, ptr %out.elt5.ptr, align 8
+  %out.elt6.ptr = getelementptr i64, ptr %out.vec.ptr, i64 6
+  store i64 %in.elt, ptr %out.elt6.ptr, align 16
+  %out.elt7.ptr = getelementptr i64, ptr %out.vec.ptr, i64 7
+  store i64 %in.elt, ptr %out.elt7.ptr, align 8
+  ret void
+}
+
+define void @vec512_double(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_double:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq (%rdi), %rax
+; ALL-NEXT:    notq %rax
+; ALL-NEXT:    movq %rax, (%rsi)
+; ALL-NEXT:    movq %rax, 8(%rsi)
+; ALL-NEXT:    movq %rax, 16(%rsi)
+; ALL-NEXT:    movq %rax, 24(%rsi)
+; ALL-NEXT:    movq %rax, 32(%rsi)
+; ALL-NEXT:    movq %rax, 40(%rsi)
+; ALL-NEXT:    movq %rax, 48(%rsi)
+; ALL-NEXT:    movq %rax, 56(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i64, ptr %in.elt.ptr, align 64
+  %in.elt.int = xor i64 %in.elt.not, -1
+  %in.elt = bitcast i64 %in.elt.int to double
+  %out.elt0.ptr = getelementptr i64, ptr %out.vec.ptr, i64 0
+  store double %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i64, ptr %out.vec.ptr, i64 1
+  store double %in.elt, ptr %out.elt1.ptr, align 8
+  %out.elt2.ptr = getelementptr i64, ptr %out.vec.ptr, i64 2
+  store double %in.elt, ptr %out.elt2.ptr, align 16
+  %out.elt3.ptr = getelementptr i64, ptr %out.vec.ptr, i64 3
+  store double %in.elt, ptr %out.elt3.ptr, align 8
+  %out.elt4.ptr = getelementptr i64, ptr %out.vec.ptr, i64 4
+  store double %in.elt, ptr %out.elt4.ptr, align 32
+  %out.elt5.ptr = getelementptr i64, ptr %out.vec.ptr, i64 5
+  store double %in.elt, ptr %out.elt5.ptr, align 8
+  %out.elt6.ptr = getelementptr i64, ptr %out.vec.ptr, i64 6
+  store double %in.elt, ptr %out.elt6.ptr, align 16
+  %out.elt7.ptr = getelementptr i64, ptr %out.vec.ptr, i64 7
+  store double %in.elt, ptr %out.elt7.ptr, align 8
+  ret void
+}
+
+define void @vec512_i128(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i128:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq (%rdi), %rax
+; ALL-NEXT:    movq 8(%rdi), %rcx
+; ALL-NEXT:    notq %rcx
+; ALL-NEXT:    notq %rax
+; ALL-NEXT:    movq %rax, (%rsi)
+; ALL-NEXT:    movq %rcx, 8(%rsi)
+; ALL-NEXT:    movq %rcx, 24(%rsi)
+; ALL-NEXT:    movq %rax, 16(%rsi)
+; ALL-NEXT:    movq %rcx, 40(%rsi)
+; ALL-NEXT:    movq %rax, 32(%rsi)
+; ALL-NEXT:    movq %rcx, 56(%rsi)
+; ALL-NEXT:    movq %rax, 48(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i128, ptr %in.elt.ptr, align 64
+  %in.elt = xor i128 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i128, ptr %out.vec.ptr, i64 0
+  store i128 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i128, ptr %out.vec.ptr, i64 1
+  store i128 %in.elt, ptr %out.elt1.ptr, align 16
+  %out.elt2.ptr = getelementptr i128, ptr %out.vec.ptr, i64 2
+  store i128 %in.elt, ptr %out.elt2.ptr, align 32
+  %out.elt3.ptr = getelementptr i128, ptr %out.vec.ptr, i64 3
+  store i128 %in.elt, ptr %out.elt3.ptr, align 16
+  ret void
+}
+
+define void @vec512_i256(ptr %in.elt.ptr, ptr %out.vec.ptr) nounwind {
+; ALL-LABEL: vec512_i256:
+; ALL:       # %bb.0:
+; ALL-NEXT:    movq 16(%rdi), %rax
+; ALL-NEXT:    movq 24(%rdi), %rcx
+; ALL-NEXT:    movq (%rdi), %rdx
+; ALL-NEXT:    movq 8(%rdi), %rdi
+; ALL-NEXT:    notq %rdi
+; ALL-NEXT:    notq %rdx
+; ALL-NEXT:    notq %rcx
+; ALL-NEXT:    notq %rax
+; ALL-NEXT:    movq %rax, 16(%rsi)
+; ALL-NEXT:    movq %rcx, 24(%rsi)
+; ALL-NEXT:    movq %rdx, (%rsi)
+; ALL-NEXT:    movq %rdi, 8(%rsi)
+; ALL-NEXT:    movq %rax, 48(%rsi)
+; ALL-NEXT:    movq %rcx, 56(%rsi)
+; ALL-NEXT:    movq %rdx, 32(%rsi)
+; ALL-NEXT:    movq %rdi, 40(%rsi)
+; ALL-NEXT:    retq
+  %in.elt.not = load i256, ptr %in.elt.ptr, align 64
+  %in.elt = xor i256 %in.elt.not, -1
+  %out.elt0.ptr = getelementptr i256, ptr %out.vec.ptr, i64 0
+  store i256 %in.elt, ptr %out.elt0.ptr, align 64
+  %out.elt1.ptr = getelementptr i256, ptr %out.vec.ptr, i64 1
+  store i256 %in.elt, ptr %out.elt1.ptr, align 32
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; AVX: {{.*}}
+; AVX1: {{.*}}
+; AVX2: {{.*}}
+; AVX512: {{.*}}
+; AVX512BW: {{.*}}
+; AVX512F: {{.*}}
+; SSE: {{.*}}
+; SSE2: {{.*}}
+; SSE3: {{.*}}
+; SSE41: {{.*}}
+; SSE42: {{.*}}
+; SSSE3: {{.*}}


        


More information about the llvm-commits mailing list